query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The list of replica set and standalone instances is displayed when the DBInstanceType parameter uses the default value replicate. To query the list of sharded cluster instances, you must set the DBInstanceType parameter to sharding.
def describe_dbinstances_with_options( self, request: dds_20151201_models.DescribeDBInstancesRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeDBInstancesResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.charge_type): query['ChargeType'] = request.charge_type if not UtilClient.is_unset(request.connection_domain): query['ConnectionDomain'] = request.connection_domain if not UtilClient.is_unset(request.dbinstance_class): query['DBInstanceClass'] = request.dbinstance_class if not UtilClient.is_unset(request.dbinstance_description): query['DBInstanceDescription'] = request.dbinstance_description if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbinstance_status): query['DBInstanceStatus'] = request.dbinstance_status if not UtilClient.is_unset(request.dbinstance_type): query['DBInstanceType'] = request.dbinstance_type if not UtilClient.is_unset(request.dbnode_type): query['DBNodeType'] = request.dbnode_type if not UtilClient.is_unset(request.engine): query['Engine'] = request.engine if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.expire_time): query['ExpireTime'] = request.expire_time if not UtilClient.is_unset(request.expired): query['Expired'] = request.expired if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.replication_factor): query['ReplicationFactor'] = request.replication_factor if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.tag): query['Tag'] = request.tag if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeDBInstances', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeDBInstancesResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def show_instances():\n return get_instances()", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def describe_dbinstances(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_with_options(request, runtime)", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def describe_dbinstances_overview(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_overview_with_options(request, runtime)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def instance_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_list\")", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def list_instance_uuids(self):\n return self.list_instances()", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def query_db_cluster(instance_id):\n try:\n response = RDS.describe_db_instances(\n DBInstanceIdentifier=instance_id\n )\n return response['DBInstances'][0]['DBClusterIdentifier']\n except KeyError:\n db_subnet = response['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName']\n return [False, db_subnet]", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _list_backups_for_instance(self, instance, marker=0, limit=20):\n uri = \"/%s/%s/backups?limit=%d&marker=%d\" % (self.uri_base,\n utils.get_id(instance),\n int(limit),\n int(marker))\n resp, resp_body = self.api.method_get(uri)\n mgr = self.api._backup_manager\n return [CloudDatabaseBackup(mgr, backup)\n for backup in resp_body.get(\"backups\")]", "def get_instances(cls):\n raise NotImplementedError", "def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")", "def describe_dbinstances_overview_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.instance_class):\n query['InstanceClass'] = request.instance_class\n if not UtilClient.is_unset(request.instance_ids):\n query['InstanceIds'] = request.instance_ids\n if not UtilClient.is_unset(request.instance_status):\n query['InstanceStatus'] = request.instance_status\n if not UtilClient.is_unset(request.instance_type):\n query['InstanceType'] = request.instance_type\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstancesOverview',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesOverviewResponse(),\n self.call_api(params, req, runtime)\n )", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def get_instance_classes():\n return Base_Instance.instance_classes", "def InstancesMultiAlloc(self, instances, reason=None, **kwargs):\n query = []\n body = {\n \"instances\": instances,\n }\n self._UpdateWithKwargs(body, **kwargs)\n\n _AppendDryRunIf(query, kwargs.get(\"dry_run\"))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n \"/%s/instances-multi-alloc\" % GANETI_RAPI_VERSION,\n query, body)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BrokerInstanceArgs']]]]:\n return pulumi.get(self, \"instances\")", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def instances(self) -> pulumi.Output[Sequence['outputs.BrokerInstance']]:\n return pulumi.get(self, \"instances\")", "async def describe_dbinstances_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_with_options_async(request, runtime)", "def get_datasource_instances(connection, ids=None, database_type=None, application=None,\n error_msg=None):\n application = application.id if isinstance(application, Application) else application\n application_provided = application is not None\n\n if application_provided:\n url = f\"{connection.base_url}/api/projects/{application}/datasources\"\n response = connection.session.get(url=url)\n else:\n database_type = None if database_type is None else database_type.join(\",\")\n ids = None if ids is None else ids.join(\",\")\n url = f\"{connection.base_url}/api/datasources\"\n response = connection.session.get(url=url, params={\n 'id': ids,\n 'database.type': database_type\n })\n if not response.ok:\n res = response.json()\n if application_provided and res.get(\"message\") == \"HTTP 404 Not Found\":\n # aka application based endpoint not supported\n # try without filtering\n warning_msg = (\"get_datasource_instances() warning: filtering by Application \"\n \"is not yet supported on this version of the I-Server. \"\n \"Returning all values.\")\n exception_handler(warning_msg, Warning, 0)\n return get_datasource_instances(connection=connection, ids=ids,\n database_type=database_type, error_msg=error_msg)\n if error_msg is None:\n if application_provided \\\n and res.get('code') == \"ERR006\" \\\n and \"not a valid value for Project ID\" in res.get('message'):\n error_msg = f\"{application} is not a valid Application class instance or ID\"\n raise ValueError(error_msg)\n error_msg = \"Error getting Datasource Instances\"\n if application_provided:\n error_msg += f\" within `{application}` Application\"\n response_handler(response, error_msg)\n response = alter_instance_list_resp(response)\n return response", "def preferred_instance_groups(self):\n if not self.unified_job_template:\n return []\n return list(self.unified_job_template.instance_groups.all())", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances(ebs_support: str) -> Dict[str, str]:\n results = {}\n paginator = EC2_CLIENT.get_paginator(\"describe_instance_types\")\n resp_itr = paginator.paginate(\n Filters=[{\"Name\": \"ebs-info.ebs-optimized-support\", \"Values\": [ebs_support]}],\n )\n\n _type = \"false\" if ebs_support == \"unsupported\" else \"true\"\n for instances in resp_itr:\n for inst in instances.get(\"InstanceTypes\"):\n results[inst[\"InstanceType\"]] = _type\n return results", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_instance_list(\n client,\n prefix: str\n):\n l = set()\n page = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, MaxKeys=page_size\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n\n while page['IsTruncated']:\n page = client.list_objects_v2(\n Bucket=bucket,\n Prefix=prefix,\n MaxKeys=page_size,\n ContinuationToken=page['NextContinuationToken']\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n return l", "def getRunningInstances(self, instanceType='Agents', runitStatus='Run'):\n res = self.sysAdminClient.getOverallStatus()\n if not res[\"OK\"]:\n self.logError(\"Failure to get %s from system administrator client\" % instanceType, res[\"Message\"])\n return res\n\n val = res['Value'][instanceType]\n runningAgents = defaultdict(dict)\n for system, agents in val.iteritems():\n for agentName, agentInfo in agents.iteritems():\n if agentInfo['Setup'] and agentInfo['Installed']:\n if runitStatus != 'All' and agentInfo['RunitStatus'] != runitStatus:\n continue\n confPath = cfgPath('/Systems/' + system + '/' + self.setup + '/%s/' % instanceType + agentName)\n for option, default in (('PollingTime', HOUR), ('Port', None)):\n optPath = os.path.join(confPath, option)\n runningAgents[agentName][option] = gConfig.getValue(optPath, default)\n runningAgents[agentName][\"LogFileLocation\"] = \\\n os.path.join(self.diracLocation, 'runit', system, agentName, 'log', 'current')\n runningAgents[agentName][\"PID\"] = agentInfo[\"PID\"]\n runningAgents[agentName]['Module'] = agentInfo['Module']\n runningAgents[agentName]['RunitStatus'] = agentInfo['RunitStatus']\n runningAgents[agentName]['System'] = system\n\n return S_OK(runningAgents)", "async def describe_dbinstances_overview_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_overview_with_options_async(request, runtime)", "def instance_types(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_types\")", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "async def describe_dbinstances_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])", "def get_solved_instances(self, db):\n instance_ids = [i[0] for i in db.session.query(db.ExperimentResult.Instances_idInstance) \\\n .filter_by(experiment=self).filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(status=1).distinct().all()]\n return db.session.query(db.Instance).filter(db.Instance.idInstance.in_(instance_ids)).all()" ]
[ "0.671852", "0.66249394", "0.6588612", "0.6422866", "0.63801897", "0.63176703", "0.6232645", "0.6174557", "0.61549586", "0.60962325", "0.609501", "0.6075133", "0.60368335", "0.6020968", "0.588904", "0.58728695", "0.5816922", "0.5762505", "0.57482815", "0.5741197", "0.5740232", "0.5729422", "0.57188135", "0.5703127", "0.5700055", "0.5691072", "0.5681384", "0.5678996", "0.56739885", "0.56680644", "0.56621283", "0.5661296", "0.5652749", "0.56473154", "0.562115", "0.5612491", "0.5587096", "0.5583506", "0.5575814", "0.5558656", "0.5549993", "0.55440927", "0.5489638", "0.54750806", "0.5466568", "0.5465829", "0.5425087", "0.5421519", "0.54086393", "0.5404777", "0.5400476", "0.5391107", "0.538625", "0.5381959", "0.53768504", "0.5374085", "0.5356266", "0.533069", "0.5328725", "0.52996844", "0.52891374", "0.5269341", "0.5235138", "0.5234785", "0.5232186", "0.52301174", "0.5225091", "0.5212418", "0.52091646", "0.5205446", "0.5196384", "0.5195692", "0.5194051", "0.51843953", "0.516599", "0.51555365", "0.51466244", "0.5143036", "0.51370186", "0.51229084", "0.5120765", "0.5114662", "0.511296", "0.5079378", "0.5070808", "0.50643075", "0.50572944", "0.5056894", "0.50562674", "0.504948", "0.50478494", "0.50366277", "0.50366277", "0.50348043", "0.5033294", "0.50299793", "0.5028394", "0.5020169", "0.5019296", "0.50183576" ]
0.5411473
48
The list of replica set and standalone instances is displayed when the DBInstanceType parameter uses the default value replicate. To query the list of sharded cluster instances, you must set the DBInstanceType parameter to sharding.
async def describe_dbinstances_with_options_async( self, request: dds_20151201_models.DescribeDBInstancesRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeDBInstancesResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.charge_type): query['ChargeType'] = request.charge_type if not UtilClient.is_unset(request.connection_domain): query['ConnectionDomain'] = request.connection_domain if not UtilClient.is_unset(request.dbinstance_class): query['DBInstanceClass'] = request.dbinstance_class if not UtilClient.is_unset(request.dbinstance_description): query['DBInstanceDescription'] = request.dbinstance_description if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbinstance_status): query['DBInstanceStatus'] = request.dbinstance_status if not UtilClient.is_unset(request.dbinstance_type): query['DBInstanceType'] = request.dbinstance_type if not UtilClient.is_unset(request.dbnode_type): query['DBNodeType'] = request.dbnode_type if not UtilClient.is_unset(request.engine): query['Engine'] = request.engine if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.expire_time): query['ExpireTime'] = request.expire_time if not UtilClient.is_unset(request.expired): query['Expired'] = request.expired if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.replication_factor): query['ReplicationFactor'] = request.replication_factor if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.tag): query['Tag'] = request.tag if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeDBInstances', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeDBInstancesResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def show_instances():\n return get_instances()", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def describe_dbinstances(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_with_options(request, runtime)", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def describe_dbinstances_overview(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_overview_with_options(request, runtime)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def instance_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_list\")", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_instance_uuids(self):\n return self.list_instances()", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def query_db_cluster(instance_id):\n try:\n response = RDS.describe_db_instances(\n DBInstanceIdentifier=instance_id\n )\n return response['DBInstances'][0]['DBClusterIdentifier']\n except KeyError:\n db_subnet = response['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName']\n return [False, db_subnet]", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def describe_dbinstances_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n self.call_api(params, req, runtime)\n )", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _list_backups_for_instance(self, instance, marker=0, limit=20):\n uri = \"/%s/%s/backups?limit=%d&marker=%d\" % (self.uri_base,\n utils.get_id(instance),\n int(limit),\n int(marker))\n resp, resp_body = self.api.method_get(uri)\n mgr = self.api._backup_manager\n return [CloudDatabaseBackup(mgr, backup)\n for backup in resp_body.get(\"backups\")]", "def get_instances(cls):\n raise NotImplementedError", "def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")", "def describe_dbinstances_overview_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.instance_class):\n query['InstanceClass'] = request.instance_class\n if not UtilClient.is_unset(request.instance_ids):\n query['InstanceIds'] = request.instance_ids\n if not UtilClient.is_unset(request.instance_status):\n query['InstanceStatus'] = request.instance_status\n if not UtilClient.is_unset(request.instance_type):\n query['InstanceType'] = request.instance_type\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstancesOverview',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesOverviewResponse(),\n self.call_api(params, req, runtime)\n )", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def get_instance_classes():\n return Base_Instance.instance_classes", "def InstancesMultiAlloc(self, instances, reason=None, **kwargs):\n query = []\n body = {\n \"instances\": instances,\n }\n self._UpdateWithKwargs(body, **kwargs)\n\n _AppendDryRunIf(query, kwargs.get(\"dry_run\"))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n \"/%s/instances-multi-alloc\" % GANETI_RAPI_VERSION,\n query, body)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BrokerInstanceArgs']]]]:\n return pulumi.get(self, \"instances\")", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def instances(self) -> pulumi.Output[Sequence['outputs.BrokerInstance']]:\n return pulumi.get(self, \"instances\")", "async def describe_dbinstances_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_with_options_async(request, runtime)", "def get_datasource_instances(connection, ids=None, database_type=None, application=None,\n error_msg=None):\n application = application.id if isinstance(application, Application) else application\n application_provided = application is not None\n\n if application_provided:\n url = f\"{connection.base_url}/api/projects/{application}/datasources\"\n response = connection.session.get(url=url)\n else:\n database_type = None if database_type is None else database_type.join(\",\")\n ids = None if ids is None else ids.join(\",\")\n url = f\"{connection.base_url}/api/datasources\"\n response = connection.session.get(url=url, params={\n 'id': ids,\n 'database.type': database_type\n })\n if not response.ok:\n res = response.json()\n if application_provided and res.get(\"message\") == \"HTTP 404 Not Found\":\n # aka application based endpoint not supported\n # try without filtering\n warning_msg = (\"get_datasource_instances() warning: filtering by Application \"\n \"is not yet supported on this version of the I-Server. \"\n \"Returning all values.\")\n exception_handler(warning_msg, Warning, 0)\n return get_datasource_instances(connection=connection, ids=ids,\n database_type=database_type, error_msg=error_msg)\n if error_msg is None:\n if application_provided \\\n and res.get('code') == \"ERR006\" \\\n and \"not a valid value for Project ID\" in res.get('message'):\n error_msg = f\"{application} is not a valid Application class instance or ID\"\n raise ValueError(error_msg)\n error_msg = \"Error getting Datasource Instances\"\n if application_provided:\n error_msg += f\" within `{application}` Application\"\n response_handler(response, error_msg)\n response = alter_instance_list_resp(response)\n return response", "def preferred_instance_groups(self):\n if not self.unified_job_template:\n return []\n return list(self.unified_job_template.instance_groups.all())", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances(ebs_support: str) -> Dict[str, str]:\n results = {}\n paginator = EC2_CLIENT.get_paginator(\"describe_instance_types\")\n resp_itr = paginator.paginate(\n Filters=[{\"Name\": \"ebs-info.ebs-optimized-support\", \"Values\": [ebs_support]}],\n )\n\n _type = \"false\" if ebs_support == \"unsupported\" else \"true\"\n for instances in resp_itr:\n for inst in instances.get(\"InstanceTypes\"):\n results[inst[\"InstanceType\"]] = _type\n return results", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_instance_list(\n client,\n prefix: str\n):\n l = set()\n page = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, MaxKeys=page_size\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n\n while page['IsTruncated']:\n page = client.list_objects_v2(\n Bucket=bucket,\n Prefix=prefix,\n MaxKeys=page_size,\n ContinuationToken=page['NextContinuationToken']\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n return l", "def getRunningInstances(self, instanceType='Agents', runitStatus='Run'):\n res = self.sysAdminClient.getOverallStatus()\n if not res[\"OK\"]:\n self.logError(\"Failure to get %s from system administrator client\" % instanceType, res[\"Message\"])\n return res\n\n val = res['Value'][instanceType]\n runningAgents = defaultdict(dict)\n for system, agents in val.iteritems():\n for agentName, agentInfo in agents.iteritems():\n if agentInfo['Setup'] and agentInfo['Installed']:\n if runitStatus != 'All' and agentInfo['RunitStatus'] != runitStatus:\n continue\n confPath = cfgPath('/Systems/' + system + '/' + self.setup + '/%s/' % instanceType + agentName)\n for option, default in (('PollingTime', HOUR), ('Port', None)):\n optPath = os.path.join(confPath, option)\n runningAgents[agentName][option] = gConfig.getValue(optPath, default)\n runningAgents[agentName][\"LogFileLocation\"] = \\\n os.path.join(self.diracLocation, 'runit', system, agentName, 'log', 'current')\n runningAgents[agentName][\"PID\"] = agentInfo[\"PID\"]\n runningAgents[agentName]['Module'] = agentInfo['Module']\n runningAgents[agentName]['RunitStatus'] = agentInfo['RunitStatus']\n runningAgents[agentName]['System'] = system\n\n return S_OK(runningAgents)", "async def describe_dbinstances_overview_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_overview_with_options_async(request, runtime)", "def instance_types(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_types\")", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])", "def get_solved_instances(self, db):\n instance_ids = [i[0] for i in db.session.query(db.ExperimentResult.Instances_idInstance) \\\n .filter_by(experiment=self).filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(status=1).distinct().all()]\n return db.session.query(db.Instance).filter(db.Instance.idInstance.in_(instance_ids)).all()" ]
[ "0.6717545", "0.66253227", "0.658917", "0.6424012", "0.63812596", "0.63181734", "0.6232309", "0.6175851", "0.6156128", "0.60976166", "0.6095269", "0.6074697", "0.60370755", "0.602163", "0.58892494", "0.58741033", "0.5818116", "0.576177", "0.57498336", "0.574103", "0.5740479", "0.572916", "0.5717762", "0.5704769", "0.5702227", "0.56921226", "0.5680284", "0.56798387", "0.56752485", "0.566948", "0.5660981", "0.5660206", "0.56522834", "0.56479156", "0.5621263", "0.5614215", "0.5585233", "0.558439", "0.5576787", "0.5557872", "0.5550619", "0.55450207", "0.54911226", "0.54763275", "0.54666305", "0.5465753", "0.54265255", "0.5422462", "0.54112405", "0.5409246", "0.54063046", "0.54011923", "0.5392158", "0.53855306", "0.538201", "0.5377429", "0.5375276", "0.5357919", "0.5331543", "0.5329661", "0.5300062", "0.5289795", "0.52700204", "0.52362615", "0.523353", "0.5232485", "0.5231753", "0.52248055", "0.5213552", "0.5210025", "0.52068806", "0.51970035", "0.51954025", "0.5194237", "0.5185339", "0.5165419", "0.515683", "0.51482177", "0.5144207", "0.51379985", "0.51248676", "0.51199573", "0.5117324", "0.5114582", "0.50794905", "0.5069909", "0.50643104", "0.5058029", "0.50564754", "0.50551957", "0.50510263", "0.50486904", "0.5037641", "0.5037641", "0.50354314", "0.5034442", "0.5030489", "0.5029113", "0.501997", "0.5019308" ]
0.50197905
99
The list of replica set and standalone instances is displayed when the DBInstanceType parameter uses the default value replicate. To query the list of sharded cluster instances, you must set the DBInstanceType parameter to sharding.
def describe_dbinstances( self, request: dds_20151201_models.DescribeDBInstancesRequest, ) -> dds_20151201_models.DescribeDBInstancesResponse: runtime = util_models.RuntimeOptions() return self.describe_dbinstances_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def show_instances():\n return get_instances()", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def describe_dbinstances_overview(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_overview_with_options(request, runtime)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def instance_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_list\")", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def list_instance_uuids(self):\n return self.list_instances()", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def query_db_cluster(instance_id):\n try:\n response = RDS.describe_db_instances(\n DBInstanceIdentifier=instance_id\n )\n return response['DBInstances'][0]['DBClusterIdentifier']\n except KeyError:\n db_subnet = response['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName']\n return [False, db_subnet]", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def describe_dbinstances_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n self.call_api(params, req, runtime)\n )", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _list_backups_for_instance(self, instance, marker=0, limit=20):\n uri = \"/%s/%s/backups?limit=%d&marker=%d\" % (self.uri_base,\n utils.get_id(instance),\n int(limit),\n int(marker))\n resp, resp_body = self.api.method_get(uri)\n mgr = self.api._backup_manager\n return [CloudDatabaseBackup(mgr, backup)\n for backup in resp_body.get(\"backups\")]", "def get_instances(cls):\n raise NotImplementedError", "def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")", "def describe_dbinstances_overview_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.instance_class):\n query['InstanceClass'] = request.instance_class\n if not UtilClient.is_unset(request.instance_ids):\n query['InstanceIds'] = request.instance_ids\n if not UtilClient.is_unset(request.instance_status):\n query['InstanceStatus'] = request.instance_status\n if not UtilClient.is_unset(request.instance_type):\n query['InstanceType'] = request.instance_type\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstancesOverview',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesOverviewResponse(),\n self.call_api(params, req, runtime)\n )", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def get_instance_classes():\n return Base_Instance.instance_classes", "def InstancesMultiAlloc(self, instances, reason=None, **kwargs):\n query = []\n body = {\n \"instances\": instances,\n }\n self._UpdateWithKwargs(body, **kwargs)\n\n _AppendDryRunIf(query, kwargs.get(\"dry_run\"))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n \"/%s/instances-multi-alloc\" % GANETI_RAPI_VERSION,\n query, body)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BrokerInstanceArgs']]]]:\n return pulumi.get(self, \"instances\")", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def instances(self) -> pulumi.Output[Sequence['outputs.BrokerInstance']]:\n return pulumi.get(self, \"instances\")", "async def describe_dbinstances_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_with_options_async(request, runtime)", "def get_datasource_instances(connection, ids=None, database_type=None, application=None,\n error_msg=None):\n application = application.id if isinstance(application, Application) else application\n application_provided = application is not None\n\n if application_provided:\n url = f\"{connection.base_url}/api/projects/{application}/datasources\"\n response = connection.session.get(url=url)\n else:\n database_type = None if database_type is None else database_type.join(\",\")\n ids = None if ids is None else ids.join(\",\")\n url = f\"{connection.base_url}/api/datasources\"\n response = connection.session.get(url=url, params={\n 'id': ids,\n 'database.type': database_type\n })\n if not response.ok:\n res = response.json()\n if application_provided and res.get(\"message\") == \"HTTP 404 Not Found\":\n # aka application based endpoint not supported\n # try without filtering\n warning_msg = (\"get_datasource_instances() warning: filtering by Application \"\n \"is not yet supported on this version of the I-Server. \"\n \"Returning all values.\")\n exception_handler(warning_msg, Warning, 0)\n return get_datasource_instances(connection=connection, ids=ids,\n database_type=database_type, error_msg=error_msg)\n if error_msg is None:\n if application_provided \\\n and res.get('code') == \"ERR006\" \\\n and \"not a valid value for Project ID\" in res.get('message'):\n error_msg = f\"{application} is not a valid Application class instance or ID\"\n raise ValueError(error_msg)\n error_msg = \"Error getting Datasource Instances\"\n if application_provided:\n error_msg += f\" within `{application}` Application\"\n response_handler(response, error_msg)\n response = alter_instance_list_resp(response)\n return response", "def preferred_instance_groups(self):\n if not self.unified_job_template:\n return []\n return list(self.unified_job_template.instance_groups.all())", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances(ebs_support: str) -> Dict[str, str]:\n results = {}\n paginator = EC2_CLIENT.get_paginator(\"describe_instance_types\")\n resp_itr = paginator.paginate(\n Filters=[{\"Name\": \"ebs-info.ebs-optimized-support\", \"Values\": [ebs_support]}],\n )\n\n _type = \"false\" if ebs_support == \"unsupported\" else \"true\"\n for instances in resp_itr:\n for inst in instances.get(\"InstanceTypes\"):\n results[inst[\"InstanceType\"]] = _type\n return results", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_instance_list(\n client,\n prefix: str\n):\n l = set()\n page = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, MaxKeys=page_size\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n\n while page['IsTruncated']:\n page = client.list_objects_v2(\n Bucket=bucket,\n Prefix=prefix,\n MaxKeys=page_size,\n ContinuationToken=page['NextContinuationToken']\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n return l", "def getRunningInstances(self, instanceType='Agents', runitStatus='Run'):\n res = self.sysAdminClient.getOverallStatus()\n if not res[\"OK\"]:\n self.logError(\"Failure to get %s from system administrator client\" % instanceType, res[\"Message\"])\n return res\n\n val = res['Value'][instanceType]\n runningAgents = defaultdict(dict)\n for system, agents in val.iteritems():\n for agentName, agentInfo in agents.iteritems():\n if agentInfo['Setup'] and agentInfo['Installed']:\n if runitStatus != 'All' and agentInfo['RunitStatus'] != runitStatus:\n continue\n confPath = cfgPath('/Systems/' + system + '/' + self.setup + '/%s/' % instanceType + agentName)\n for option, default in (('PollingTime', HOUR), ('Port', None)):\n optPath = os.path.join(confPath, option)\n runningAgents[agentName][option] = gConfig.getValue(optPath, default)\n runningAgents[agentName][\"LogFileLocation\"] = \\\n os.path.join(self.diracLocation, 'runit', system, agentName, 'log', 'current')\n runningAgents[agentName][\"PID\"] = agentInfo[\"PID\"]\n runningAgents[agentName]['Module'] = agentInfo['Module']\n runningAgents[agentName]['RunitStatus'] = agentInfo['RunitStatus']\n runningAgents[agentName]['System'] = system\n\n return S_OK(runningAgents)", "async def describe_dbinstances_overview_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_overview_with_options_async(request, runtime)", "def instance_types(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_types\")", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "async def describe_dbinstances_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])", "def get_solved_instances(self, db):\n instance_ids = [i[0] for i in db.session.query(db.ExperimentResult.Instances_idInstance) \\\n .filter_by(experiment=self).filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(status=1).distinct().all()]\n return db.session.query(db.Instance).filter(db.Instance.idInstance.in_(instance_ids)).all()" ]
[ "0.671852", "0.66249394", "0.6588612", "0.6422866", "0.63801897", "0.63176703", "0.6232645", "0.6174557", "0.61549586", "0.60962325", "0.609501", "0.60368335", "0.6020968", "0.588904", "0.58728695", "0.5816922", "0.5762505", "0.57482815", "0.5741197", "0.5740232", "0.5729422", "0.57188135", "0.5703127", "0.5700055", "0.5691072", "0.5681384", "0.5678996", "0.56739885", "0.56680644", "0.56621283", "0.5661296", "0.5652749", "0.56473154", "0.562115", "0.5612491", "0.5587096", "0.5583506", "0.5575814", "0.5558656", "0.5549993", "0.55440927", "0.5489638", "0.54750806", "0.5466568", "0.5465829", "0.5425087", "0.5421519", "0.5411473", "0.54086393", "0.5404777", "0.5400476", "0.5391107", "0.538625", "0.5381959", "0.53768504", "0.5374085", "0.5356266", "0.533069", "0.5328725", "0.52996844", "0.52891374", "0.5269341", "0.5235138", "0.5234785", "0.5232186", "0.52301174", "0.5225091", "0.5212418", "0.52091646", "0.5205446", "0.5196384", "0.5195692", "0.5194051", "0.51843953", "0.516599", "0.51555365", "0.51466244", "0.5143036", "0.51370186", "0.51229084", "0.5120765", "0.5114662", "0.511296", "0.5079378", "0.5070808", "0.50643075", "0.50572944", "0.5056894", "0.50562674", "0.504948", "0.50478494", "0.50366277", "0.50366277", "0.50348043", "0.5033294", "0.50299793", "0.5028394", "0.5020169", "0.5019296", "0.50183576" ]
0.6075133
11
The list of replica set and standalone instances is displayed when the DBInstanceType parameter uses the default value replicate. To query the list of sharded cluster instances, you must set the DBInstanceType parameter to sharding.
async def describe_dbinstances_async( self, request: dds_20151201_models.DescribeDBInstancesRequest, ) -> dds_20151201_models.DescribeDBInstancesResponse: runtime = util_models.RuntimeOptions() return await self.describe_dbinstances_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def show_instances():\n return get_instances()", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def describe_dbinstances(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_with_options(request, runtime)", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def describe_dbinstances_overview(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstances_overview_with_options(request, runtime)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def instance_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_list\")", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list_instance_uuids(self):\n return self.list_instances()", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def query_db_cluster(instance_id):\n try:\n response = RDS.describe_db_instances(\n DBInstanceIdentifier=instance_id\n )\n return response['DBInstances'][0]['DBClusterIdentifier']\n except KeyError:\n db_subnet = response['DBInstances'][0]['DBSubnetGroup']['DBSubnetGroupName']\n return [False, db_subnet]", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def list_instances_detail(self, context):\n LOG.debug(\"list_instances_detail\")\n\n info_list = []\n bmms = db.bmm_get_all_by_instance_id_not_null(context)\n for bmm in bmms:\n instance = db.instance_get(context, bmm[\"instance_id\"])\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"off\":\n inst_power_state = power_state.SHUTOFF\n\n if instance[\"vm_state\"] == vm_states.ACTIVE:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.STOPPED})\n else:\n inst_power_state = power_state.RUNNING\n\n if instance[\"vm_state\"] == vm_states.STOPPED:\n db.instance_update(context, instance[\"id\"], {\"vm_state\": vm_states.ACTIVE})\n\n info_list.append(driver.InstanceInfo(self._instance_id_to_name(bmm[\"instance_id\"]), \n inst_power_state))\n\n return info_list", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def list_instances(self):\n instances = utils.list_instances(self.compute_client,\n drv_conf.resource_group)\n\n self._uuid_to_omni_instance.clear()\n instance_names = []\n for instance in instances:\n openstack_id = None\n if instance.tags and 'openstack_id' in instance.tags:\n openstack_id = instance.tags['openstack_id']\n if openstack_id is None:\n openstack_id = self._get_uuid_from_omni_id(instance.name)\n self._uuid_to_omni_instance[openstack_id] = instance\n instance_names.append(instance.name)\n return instance_names", "def list_instance_name():\n\n if request.method == \"GET\":\n with lock:\n names = list(instances.keys())\n return jsonify(names)\n return Response(status=200)", "def describe_dbinstances_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n self.call_api(params, req, runtime)\n )", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _list_backups_for_instance(self, instance, marker=0, limit=20):\n uri = \"/%s/%s/backups?limit=%d&marker=%d\" % (self.uri_base,\n utils.get_id(instance),\n int(limit),\n int(marker))\n resp, resp_body = self.api.method_get(uri)\n mgr = self.api._backup_manager\n return [CloudDatabaseBackup(mgr, backup)\n for backup in resp_body.get(\"backups\")]", "def get_instances(cls):\n raise NotImplementedError", "def instances(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"instances\")", "def describe_dbinstances_overview_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.instance_class):\n query['InstanceClass'] = request.instance_class\n if not UtilClient.is_unset(request.instance_ids):\n query['InstanceIds'] = request.instance_ids\n if not UtilClient.is_unset(request.instance_status):\n query['InstanceStatus'] = request.instance_status\n if not UtilClient.is_unset(request.instance_type):\n query['InstanceType'] = request.instance_type\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstancesOverview',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesOverviewResponse(),\n self.call_api(params, req, runtime)\n )", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)", "def get_instance_classes():\n return Base_Instance.instance_classes", "def InstancesMultiAlloc(self, instances, reason=None, **kwargs):\n query = []\n body = {\n \"instances\": instances,\n }\n self._UpdateWithKwargs(body, **kwargs)\n\n _AppendDryRunIf(query, kwargs.get(\"dry_run\"))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n \"/%s/instances-multi-alloc\" % GANETI_RAPI_VERSION,\n query, body)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BrokerInstanceArgs']]]]:\n return pulumi.get(self, \"instances\")", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_instance_templates(self):\n response = self.call_api('/global/instanceTemplates')\n return {\n template['name']: template for template in response.get('items', [])\n }", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def test_list_ec2_instances(self):\n instances = [e for e in list_ec2_instances()]\n self.assertEqual([], instances)", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def instances(self) -> pulumi.Output[Sequence['outputs.BrokerInstance']]:\n return pulumi.get(self, \"instances\")", "def get_datasource_instances(connection, ids=None, database_type=None, application=None,\n error_msg=None):\n application = application.id if isinstance(application, Application) else application\n application_provided = application is not None\n\n if application_provided:\n url = f\"{connection.base_url}/api/projects/{application}/datasources\"\n response = connection.session.get(url=url)\n else:\n database_type = None if database_type is None else database_type.join(\",\")\n ids = None if ids is None else ids.join(\",\")\n url = f\"{connection.base_url}/api/datasources\"\n response = connection.session.get(url=url, params={\n 'id': ids,\n 'database.type': database_type\n })\n if not response.ok:\n res = response.json()\n if application_provided and res.get(\"message\") == \"HTTP 404 Not Found\":\n # aka application based endpoint not supported\n # try without filtering\n warning_msg = (\"get_datasource_instances() warning: filtering by Application \"\n \"is not yet supported on this version of the I-Server. \"\n \"Returning all values.\")\n exception_handler(warning_msg, Warning, 0)\n return get_datasource_instances(connection=connection, ids=ids,\n database_type=database_type, error_msg=error_msg)\n if error_msg is None:\n if application_provided \\\n and res.get('code') == \"ERR006\" \\\n and \"not a valid value for Project ID\" in res.get('message'):\n error_msg = f\"{application} is not a valid Application class instance or ID\"\n raise ValueError(error_msg)\n error_msg = \"Error getting Datasource Instances\"\n if application_provided:\n error_msg += f\" within `{application}` Application\"\n response_handler(response, error_msg)\n response = alter_instance_list_resp(response)\n return response", "def preferred_instance_groups(self):\n if not self.unified_job_template:\n return []\n return list(self.unified_job_template.instance_groups.all())", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances(ebs_support: str) -> Dict[str, str]:\n results = {}\n paginator = EC2_CLIENT.get_paginator(\"describe_instance_types\")\n resp_itr = paginator.paginate(\n Filters=[{\"Name\": \"ebs-info.ebs-optimized-support\", \"Values\": [ebs_support]}],\n )\n\n _type = \"false\" if ebs_support == \"unsupported\" else \"true\"\n for instances in resp_itr:\n for inst in instances.get(\"InstanceTypes\"):\n results[inst[\"InstanceType\"]] = _type\n return results", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def get_instance_list(\n client,\n prefix: str\n):\n l = set()\n page = client.list_objects_v2(\n Bucket=bucket, Prefix=prefix, MaxKeys=page_size\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n\n while page['IsTruncated']:\n page = client.list_objects_v2(\n Bucket=bucket,\n Prefix=prefix,\n MaxKeys=page_size,\n ContinuationToken=page['NextContinuationToken']\n )\n l |= {r['Key'] for r in page['Contents'] if r['Key'][-4:] == 'json'}\n return l", "def getRunningInstances(self, instanceType='Agents', runitStatus='Run'):\n res = self.sysAdminClient.getOverallStatus()\n if not res[\"OK\"]:\n self.logError(\"Failure to get %s from system administrator client\" % instanceType, res[\"Message\"])\n return res\n\n val = res['Value'][instanceType]\n runningAgents = defaultdict(dict)\n for system, agents in val.iteritems():\n for agentName, agentInfo in agents.iteritems():\n if agentInfo['Setup'] and agentInfo['Installed']:\n if runitStatus != 'All' and agentInfo['RunitStatus'] != runitStatus:\n continue\n confPath = cfgPath('/Systems/' + system + '/' + self.setup + '/%s/' % instanceType + agentName)\n for option, default in (('PollingTime', HOUR), ('Port', None)):\n optPath = os.path.join(confPath, option)\n runningAgents[agentName][option] = gConfig.getValue(optPath, default)\n runningAgents[agentName][\"LogFileLocation\"] = \\\n os.path.join(self.diracLocation, 'runit', system, agentName, 'log', 'current')\n runningAgents[agentName][\"PID\"] = agentInfo[\"PID\"]\n runningAgents[agentName]['Module'] = agentInfo['Module']\n runningAgents[agentName]['RunitStatus'] = agentInfo['RunitStatus']\n runningAgents[agentName]['System'] = system\n\n return S_OK(runningAgents)", "async def describe_dbinstances_overview_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesOverviewRequest,\n ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstances_overview_with_options_async(request, runtime)", "def instance_types(self) -> Sequence[str]:\n return pulumi.get(self, \"instance_types\")", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def getInstancesD(region):\n instances = getInstances(region)\n instancesDicts = {\"id\": i.id,\n \"KEEP-tag\": getKeepTag(i),\n \"instance_type\": i.instance_type,\n \"state\": i.state,\n \"launch_time\": i.launch_time,\n \"security_groups\": getGroups(i),\n \"region\": i.region.name,\n \"PROD\": isProduction(i)\n }", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")", "def Get_Running_Instances():\n ec2 = boto3.resource('ec2') \n #call the features resource from the boto3 library\n instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['pending', 'running',]},])\n #filter the instances returned using the state name\n #you can also filter using Tags by adding the filters: \n #[{'Name': 'tag-key', 'Values': ['Role','Name',]}, {'Name': 'tag-value', 'Values': ['*test*', '*TEST*',]},]\n return [instance.id for instance in instances]\n #return a liste with the ids of the instances", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "def test_instance_api(self):\n\n # Test creating a db instance.\n # ----------------------------\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"%s\",\n \"flavorRef\": \"103\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\" % INSTANCE_NAME\n\n client = httplib2.Http(\".cache\", timeout=TIMEOUTS['http'], disable_ssl_certificate_validation=True)\n resp, content = self._execute_request(client, \"instances\", \"POST\", body)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status, (\"Expecting 201 as response status of create instance but received %s\" % resp.status))\n content = self._load_json(content,'Create Instance')\n self.assertTrue(content.has_key('instance'), \"Response body of create instance does not have 'instance' field\")\n\n credential = content['instance']['credential']\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n\n # Test listing all db instances.\n # ------------------------------\n LOG.info(\"* Listing all db instances\")\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the response is\n # in the expected format (e.g. a JSON object beginning with an\n # 'instances' key).\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of list instance but received %s\" % resp.status))\n content = self._load_json(content,'List all Instances')\n self.assertTrue(content.has_key('instances'), \"Response body of list instances does not contain 'instances' field.\")\n\n\n # Test getting a specific db instance.\n # ------------------------------------\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n \n # Assert 1) that the request was accepted and 2) that the returned\n # instance is the same as the accepted instance.\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n self.assertEqual(self.instance_id, str(content['instance']['id']), \"Instance ID not found in Show Instance response\")\n\n\n # Check to see if the instance we previously created is \n # in the 'running' state\n # -----------------------------------------------------\n wait_so_far = 0\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n while status != 'running' or pub_ip is None or len(pub_ip) <= 0:\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id, \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n pub_ip = content['instance']['hostname']\n\n if status != 'running':\n\n self.fail(\"for some reason the instance did not switch to 'running' in %s\" % TIMEOUT_STR)\n else:\n # try to connect to mysql instance\n pub_ip = content['instance']['hostname']\n # user/pass = credentials\n db_user = credential['username']\n db_passwd = credential['password']\n db_name = 'mysql'\n\n LOG.info(\"* Trying to connect to mysql DB on first boot: %s, %s, %s\" %(db_user, db_passwd, pub_ip))\n conn = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed on first boot over %s: \" % pub_ip)\n conn.close()\n\n\n\n # Test resetting the password on a db instance.\n # ---------------------------------------------\n LOG.info(\"* Resetting password on instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/resetpassword\", \"POST\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of reset password but received %s\" % resp.status))\n content = self._load_json(content,'Get new password')\n\n if resp.status == 200 :\n db_new_passwd = content['password']\n LOG.info(\"* Trying to connect to mysql DB after resetting password: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n LOG.exception(\"* something is wrong with mysql connection after resetting password\")\n conn.close()\n LOG.info(\"* Maybe the old password still works ?\")\n conn_2 = self.db_connect(db_user, db_passwd, pub_ip, db_name)\n if conn_2 is None:\n LOG.exception(\"* no, old password does not work anymore\")\n else:\n LOG.info(\"* old password still works, new password has not kicked in\")\n conn_2.close()\n self.fail(\"* maximum trials reached, db connection failed after resetting password over %s: \" % pub_ip)\n\n\n # XXX: Suspect restarting too soon after a \"reset password\" command is putting the instance in a bad mood on restart\n time.sleep(DELAYS['between_reset_and_restart'])\n\n # Test restarting a db instance.\n # ------------------------------\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id +\"/restart\", \"POST\", \"\")\n self.assertEqual(204, resp.status, (\"Expecting 204 as response status of restart instance but received %s\" % resp.status))\n\n # Test getting a specific db instance.\n LOG.info(\"* Getting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance after Restart')\n \n wait_so_far = 0\n status = content['instance']['status']\n while status != 'running':\n # wait a max of max_wait for instance status to show running\n time.sleep(POLL_INTERVALS['boot'])\n wait_so_far += POLL_INTERVALS['boot']\n if wait_so_far >= TIMEOUTS['boot']:\n break\n \n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"GET\", \"\")\n self.assertEqual(200, resp.status, (\"Expecting 200 as response status of show instance but received %s\" % resp.status))\n content = self._load_json(content,'Get Single Instance')\n status = content['instance']['status']\n\n if status != 'running':\n self.fail(\"Instance %s did not go to running after a reboot and waiting %s\" % (self.instance_id, TIMEOUT_STR))\n else:\n # try to connect to mysql instance\n time.sleep(DELAYS['between_reboot_and_connect'])\n LOG.info(\"* Trying to connect to mysql DB after rebooting the instance: %s, %s, %s\" %(db_user, db_new_passwd, pub_ip))\n\n conn = self.db_connect(db_user, db_new_passwd, pub_ip, db_name)\n if conn is None:\n self.fail(\"* maximum trials reached, db connection failed after rebooting instance over %s: \" % pub_ip)\n conn.close()\n\n # Test deleting a db instance.\n # ----------------------------\n LOG.info(\"* Deleting instance %s\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances/\" + self.instance_id , \"DELETE\", \"\")\n\n # Assert 1) that the request was accepted and 2) that the instance has\n # been deleted.\n self.assertEqual(204, resp.status, \"Response status of instance delete did not return 204\")\n\n LOG.debug(\"Verifying that instance %s has been deleted\" % self.instance_id)\n resp, content = self._execute_request(client, \"instances\", \"GET\", \"\")\n \n if not content:\n pass\n else:\n content = json.loads(content)\n for each in content['instances']:\n self.assertFalse(each['id'] == self.instance_id, (\"Instance %s did not actually get deleted\" % self.instance_id))\n\n LOG.debug(\"Sleeping...\")\n time.sleep(DELAYS['after_delete'])", "def get_solved_instances(self, db):\n instance_ids = [i[0] for i in db.session.query(db.ExperimentResult.Instances_idInstance) \\\n .filter_by(experiment=self).filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(status=1).distinct().all()]\n return db.session.query(db.Instance).filter(db.Instance.idInstance.in_(instance_ids)).all()", "async def describe_dbinstances_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstancesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstancesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.charge_type):\n query['ChargeType'] = request.charge_type\n if not UtilClient.is_unset(request.connection_domain):\n query['ConnectionDomain'] = request.connection_domain\n if not UtilClient.is_unset(request.dbinstance_class):\n query['DBInstanceClass'] = request.dbinstance_class\n if not UtilClient.is_unset(request.dbinstance_description):\n query['DBInstanceDescription'] = request.dbinstance_description\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.dbinstance_status):\n query['DBInstanceStatus'] = request.dbinstance_status\n if not UtilClient.is_unset(request.dbinstance_type):\n query['DBInstanceType'] = request.dbinstance_type\n if not UtilClient.is_unset(request.dbnode_type):\n query['DBNodeType'] = request.dbnode_type\n if not UtilClient.is_unset(request.engine):\n query['Engine'] = request.engine\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.expire_time):\n query['ExpireTime'] = request.expire_time\n if not UtilClient.is_unset(request.expired):\n query['Expired'] = request.expired\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.replication_factor):\n query['ReplicationFactor'] = request.replication_factor\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.vpc_id):\n query['VpcId'] = request.vpc_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstances',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstancesResponse(),\n await self.call_api_async(params, req, runtime)\n )" ]
[ "0.6715705", "0.6623986", "0.6588162", "0.6423212", "0.638032", "0.63168824", "0.62313014", "0.6175161", "0.6156203", "0.6095728", "0.60955036", "0.6073728", "0.6036075", "0.6021459", "0.58897156", "0.5872876", "0.58168375", "0.5760326", "0.5748851", "0.5741341", "0.5739596", "0.5727637", "0.57166696", "0.57035667", "0.57016337", "0.568988", "0.5679877", "0.5679708", "0.56748325", "0.566804", "0.56609267", "0.5660364", "0.56522566", "0.56468725", "0.562183", "0.56138724", "0.5584789", "0.55837613", "0.55769926", "0.55561745", "0.5549342", "0.55439514", "0.5488639", "0.5476838", "0.546526", "0.54641074", "0.542553", "0.542129", "0.5408256", "0.54077035", "0.5405454", "0.5401298", "0.53925514", "0.5382511", "0.53806984", "0.537756", "0.5374693", "0.5357871", "0.53308296", "0.53278285", "0.5300895", "0.528804", "0.5269781", "0.52356505", "0.5233279", "0.52330637", "0.52297646", "0.52255434", "0.5212965", "0.5209206", "0.52061003", "0.5197776", "0.5193252", "0.5185105", "0.51660484", "0.51558906", "0.5145746", "0.51445484", "0.513728", "0.51248634", "0.51186514", "0.5116264", "0.5114358", "0.5078964", "0.50705683", "0.5064329", "0.5056921", "0.5056357", "0.5053488", "0.5049877", "0.5047865", "0.5038064", "0.5038064", "0.50342846", "0.50339806", "0.5030533", "0.50285125", "0.5019121", "0.5018504", "0.50171655" ]
0.5194656
72
If you do not specify an instance when you call this operation, the overview information of all instances in the specified region within this account is returned. Paged query is disabled for this operation.
def describe_dbinstances_overview_with_options( self, request: dds_20151201_models.DescribeDBInstancesOverviewRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.charge_type): query['ChargeType'] = request.charge_type if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.instance_class): query['InstanceClass'] = request.instance_class if not UtilClient.is_unset(request.instance_ids): query['InstanceIds'] = request.instance_ids if not UtilClient.is_unset(request.instance_status): query['InstanceStatus'] = request.instance_status if not UtilClient.is_unset(request.instance_type): query['InstanceType'] = request.instance_type if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeDBInstancesOverview', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeDBInstancesOverviewResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def search(self, q=\"\", version=\"\", instance=\"\", page=1):\n\n access_token = self._get_access_token()\n payload = {\n 'q': q,\n 'version': version,\n 'instance': instance,\n 'page[number]': page,\n 'page[size]': 20,\n }\n headers = {'Authorization': \"Bearer \"+access_token}\n response = requests.get(self.api_endpoint, params=payload, headers=headers)\n\n if response.status_code == 401:\n raise AuthError('not authorized')\n elif response.status_code == 404:\n raise NotFoundError('entry not found')\n elif response.status_code != 200:\n raise Error('HTTP error')\n\n try:\n obj = response.json()\n except (ValueError, KeyError):\n raise Error(\"couldn't decode json\")\n\n return obj", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def to_representation(self, instance):\n\n return instance.region", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def show_instances():\n return get_instances()", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def load_instances_page(self):\n logging.info(\"loading instances page {}\".format(self.horizon_instances_url))\n\n return self._load_page_measure_time(self.driver, self.horizon_instances_url,\n tag = \"Instances Page\")", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def name(self):\n return self._client.project_name + '/instances/' + self.instance_id", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def Get(self, instance_name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesGetRequest(\n zone=zone, project=project, instance=instance_name)\n instance = self.client.instances.Get(request)\n if self._VMCreatedByExecGroup(instance):\n return instance\n raise HttpNotFoundError(\n 'Instance:{} not found'.format(instance_name), None, None)", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def query_region(self, region_name=None, region_id=None):\n\n if region_name is not None and region_id is None:\n query = \"select * from region where region_name='{}'\".format(\n region_name)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n elif region_name is None and region_id is not None:\n query = \"select * from region where region_id='{}'\".format(\n region_id)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def nfvi_get_instances(paging, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('get_instances', paging, context,\n callback=callback)\n return cmd_id", "def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")", "def list_addresses(self, region):\n assert is_valid_region(region), region\n page_token = None\n while True:\n params = {'maxResults': 250}\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api(\n '/regions/%s/addresses' % region, params=params, deadline=120)\n for addr in resp.get('items', []):\n yield addr\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def test_aws_service_api_regions_get(self):\n pass", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def gce_instance_filter(self) -> 'outputs.GceInstanceFilterResponse':\n return pulumi.get(self, \"gce_instance_filter\")", "def get_region_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/index', region, **filters)", "def get_paginator(cls, instance, page=1, item_count=None, items_per_page=50,\n db_session=None,\n filter_params=None, **kwargs):\n if filter_params is None:\n filter_params = {}\n query = cls.get_latest_entries(instance, only_active=True, limit=None)\n return SqlalchemyOrmPage(query, page=page, item_count=item_count,\n items_per_page=items_per_page,\n **kwargs)", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def GetInstanceInfo(self, instance, static=None, reason=None):\n query = []\n if static is not None:\n query.append((\"static\", static))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/info\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get(self, region=None):\n base_url = self.base.base_url[region]\n url = '{}/lol/status/v3/shard-data'.format(base_url)\n r = requests.get(url, headers=self.base.headers)\n return r", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)", "def region(cls):\n return cls.REGION", "def region(self):\n return self._get(\"region\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "def describe_instances(self, instance_ids = None):\n response = instance.describe_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = DescribeInstancesResponse.DescribeInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def get_instance_status(module, ecs, zone_id=None, page_number=None, page_size=None):\n changed = False\n try:\n result = ecs.get_instance_status(zone_id=zone_id, page_number=page_number, page_size=page_size)\n\n if 'error' in (''.join(str(result))).lower():\n module.fail_json(msg=result)\n changed = True\n\n except ECSResponseError as e:\n module.fail_json(msg='Unable to get status of instance(s), error: {0}'.format(e)) \n\n return changed, result", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def _get_instance_profile(role_name, iam_client=None, region_name=None):\n if not iam_client:\n iam_client = boto3.client('iam', region_name=region_name)\n try:\n resp = iam_client.get_instance_profile(\n InstanceProfileName=role_name)\n instance_profile_arn = resp['InstanceProfile']['Arn']\n except botocore.exceptions.ClientError as err:\n instance_profile_arn = None\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchEntity':\n raise\n return instance_profile_arn", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def GetInstance(\n self,\n instance_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeVirtualMachine':\n instances = self.ListInstances(resource_group_name=resource_group_name)\n if instance_name not in instances:\n raise errors.ResourceNotFoundError(\n 'Instance {0:s} was not found in subscription {1:s}'.format(\n instance_name, self.az_account.subscription_id), __name__)\n return instances[instance_name]", "def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set" ]
[ "0.65359414", "0.64602095", "0.6369204", "0.6290484", "0.6248728", "0.5925351", "0.59036493", "0.564685", "0.5643462", "0.5618888", "0.5610908", "0.56100076", "0.55792296", "0.55599517", "0.5556868", "0.5544195", "0.55412877", "0.5532845", "0.5486521", "0.5459931", "0.5451159", "0.544322", "0.5440652", "0.54132307", "0.53653216", "0.5346481", "0.534229", "0.53411555", "0.5339474", "0.5339135", "0.53342795", "0.53285027", "0.5323785", "0.53142667", "0.53093743", "0.5290159", "0.5285819", "0.5267814", "0.5257835", "0.52556616", "0.5246127", "0.5244615", "0.5242059", "0.5233834", "0.5220303", "0.51756674", "0.5161309", "0.5160739", "0.51529986", "0.5126975", "0.5126699", "0.5113106", "0.510567", "0.5098133", "0.5090952", "0.5088127", "0.5083395", "0.5074973", "0.5065279", "0.5059674", "0.50435746", "0.5037295", "0.50339186", "0.5014508", "0.5009371", "0.5008984", "0.50083697", "0.4999327", "0.4989854", "0.49891996", "0.498695", "0.4980958", "0.4977251", "0.49634483", "0.4961415", "0.49455416", "0.494482", "0.4939827", "0.4932846", "0.49224705", "0.49208096", "0.49165648", "0.4915834", "0.49112868", "0.49100685", "0.4910008", "0.4906191", "0.4905006", "0.49010554", "0.48983964", "0.48955312", "0.48891973", "0.48798817", "0.4878187", "0.48771825", "0.48743075", "0.48736978", "0.48693976", "0.48624396", "0.4844482", "0.48442042" ]
0.0
-1
If you do not specify an instance when you call this operation, the overview information of all instances in the specified region within this account is returned. Paged query is disabled for this operation.
async def describe_dbinstances_overview_with_options_async( self, request: dds_20151201_models.DescribeDBInstancesOverviewRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.charge_type): query['ChargeType'] = request.charge_type if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.instance_class): query['InstanceClass'] = request.instance_class if not UtilClient.is_unset(request.instance_ids): query['InstanceIds'] = request.instance_ids if not UtilClient.is_unset(request.instance_status): query['InstanceStatus'] = request.instance_status if not UtilClient.is_unset(request.instance_type): query['InstanceType'] = request.instance_type if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeDBInstancesOverview', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeDBInstancesOverviewResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def search(self, q=\"\", version=\"\", instance=\"\", page=1):\n\n access_token = self._get_access_token()\n payload = {\n 'q': q,\n 'version': version,\n 'instance': instance,\n 'page[number]': page,\n 'page[size]': 20,\n }\n headers = {'Authorization': \"Bearer \"+access_token}\n response = requests.get(self.api_endpoint, params=payload, headers=headers)\n\n if response.status_code == 401:\n raise AuthError('not authorized')\n elif response.status_code == 404:\n raise NotFoundError('entry not found')\n elif response.status_code != 200:\n raise Error('HTTP error')\n\n try:\n obj = response.json()\n except (ValueError, KeyError):\n raise Error(\"couldn't decode json\")\n\n return obj", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def to_representation(self, instance):\n\n return instance.region", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def show_instances():\n return get_instances()", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def load_instances_page(self):\n logging.info(\"loading instances page {}\".format(self.horizon_instances_url))\n\n return self._load_page_measure_time(self.driver, self.horizon_instances_url,\n tag = \"Instances Page\")", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def name(self):\n return self._client.project_name + '/instances/' + self.instance_id", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def Get(self, instance_name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesGetRequest(\n zone=zone, project=project, instance=instance_name)\n instance = self.client.instances.Get(request)\n if self._VMCreatedByExecGroup(instance):\n return instance\n raise HttpNotFoundError(\n 'Instance:{} not found'.format(instance_name), None, None)", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def query_region(self, region_name=None, region_id=None):\n\n if region_name is not None and region_id is None:\n query = \"select * from region where region_name='{}'\".format(\n region_name)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n elif region_name is None and region_id is not None:\n query = \"select * from region where region_id='{}'\".format(\n region_id)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def nfvi_get_instances(paging, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('get_instances', paging, context,\n callback=callback)\n return cmd_id", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")", "def list_addresses(self, region):\n assert is_valid_region(region), region\n page_token = None\n while True:\n params = {'maxResults': 250}\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api(\n '/regions/%s/addresses' % region, params=params, deadline=120)\n for addr in resp.get('items', []):\n yield addr\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def test_aws_service_api_regions_get(self):\n pass", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def gce_instance_filter(self) -> 'outputs.GceInstanceFilterResponse':\n return pulumi.get(self, \"gce_instance_filter\")", "def get_region_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/index', region, **filters)", "def get_paginator(cls, instance, page=1, item_count=None, items_per_page=50,\n db_session=None,\n filter_params=None, **kwargs):\n if filter_params is None:\n filter_params = {}\n query = cls.get_latest_entries(instance, only_active=True, limit=None)\n return SqlalchemyOrmPage(query, page=page, item_count=item_count,\n items_per_page=items_per_page,\n **kwargs)", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def GetInstanceInfo(self, instance, static=None, reason=None):\n query = []\n if static is not None:\n query.append((\"static\", static))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/info\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get(self, region=None):\n base_url = self.base.base_url[region]\n url = '{}/lol/status/v3/shard-data'.format(base_url)\n r = requests.get(url, headers=self.base.headers)\n return r", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)", "def region(cls):\n return cls.REGION", "def region(self):\n return self._get(\"region\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def describe_instances(self, instance_ids = None):\n response = instance.describe_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = DescribeInstancesResponse.DescribeInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def get_instance_status(module, ecs, zone_id=None, page_number=None, page_size=None):\n changed = False\n try:\n result = ecs.get_instance_status(zone_id=zone_id, page_number=page_number, page_size=page_size)\n\n if 'error' in (''.join(str(result))).lower():\n module.fail_json(msg=result)\n changed = True\n\n except ECSResponseError as e:\n module.fail_json(msg='Unable to get status of instance(s), error: {0}'.format(e)) \n\n return changed, result", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def _get_instance_profile(role_name, iam_client=None, region_name=None):\n if not iam_client:\n iam_client = boto3.client('iam', region_name=region_name)\n try:\n resp = iam_client.get_instance_profile(\n InstanceProfileName=role_name)\n instance_profile_arn = resp['InstanceProfile']['Arn']\n except botocore.exceptions.ClientError as err:\n instance_profile_arn = None\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchEntity':\n raise\n return instance_profile_arn", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def GetInstance(\n self,\n instance_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeVirtualMachine':\n instances = self.ListInstances(resource_group_name=resource_group_name)\n if instance_name not in instances:\n raise errors.ResourceNotFoundError(\n 'Instance {0:s} was not found in subscription {1:s}'.format(\n instance_name, self.az_account.subscription_id), __name__)\n return instances[instance_name]", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set", "def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']" ]
[ "0.6536427", "0.64603454", "0.6369071", "0.628964", "0.62482893", "0.5924644", "0.59029126", "0.5645817", "0.56441754", "0.56185406", "0.56104", "0.5609651", "0.5578235", "0.555862", "0.55563", "0.5543599", "0.55416054", "0.5532806", "0.5486836", "0.5459136", "0.545084", "0.54427016", "0.54418004", "0.54130167", "0.53649247", "0.5344637", "0.534187", "0.5340681", "0.53389406", "0.5337647", "0.5335273", "0.5327897", "0.5322926", "0.5311984", "0.53091383", "0.5289881", "0.5285853", "0.52681345", "0.5258371", "0.5255853", "0.5245078", "0.5244796", "0.52426845", "0.5236108", "0.5220221", "0.51752675", "0.51613045", "0.51610214", "0.5152351", "0.51267797", "0.5125479", "0.5112153", "0.51051885", "0.509809", "0.50909775", "0.5088697", "0.508239", "0.50735366", "0.5062942", "0.50579065", "0.5044014", "0.5036059", "0.5032743", "0.50136894", "0.5009891", "0.50091124", "0.5008766", "0.49965584", "0.49885496", "0.49884477", "0.49856573", "0.4980986", "0.49782607", "0.49617812", "0.49616072", "0.49439633", "0.49438655", "0.49377277", "0.49312493", "0.49219328", "0.49205652", "0.49170738", "0.49159774", "0.49112108", "0.49107635", "0.490991", "0.4907055", "0.49036297", "0.49003538", "0.489749", "0.48948932", "0.48883522", "0.48796153", "0.4876759", "0.4875955", "0.48741686", "0.48729518", "0.48687348", "0.486179", "0.48449168", "0.4843765" ]
0.0
-1
If you do not specify an instance when you call this operation, the overview information of all instances in the specified region within this account is returned. Paged query is disabled for this operation.
def describe_dbinstances_overview( self, request: dds_20151201_models.DescribeDBInstancesOverviewRequest, ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse: runtime = util_models.RuntimeOptions() return self.describe_dbinstances_overview_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def search(self, q=\"\", version=\"\", instance=\"\", page=1):\n\n access_token = self._get_access_token()\n payload = {\n 'q': q,\n 'version': version,\n 'instance': instance,\n 'page[number]': page,\n 'page[size]': 20,\n }\n headers = {'Authorization': \"Bearer \"+access_token}\n response = requests.get(self.api_endpoint, params=payload, headers=headers)\n\n if response.status_code == 401:\n raise AuthError('not authorized')\n elif response.status_code == 404:\n raise NotFoundError('entry not found')\n elif response.status_code != 200:\n raise Error('HTTP error')\n\n try:\n obj = response.json()\n except (ValueError, KeyError):\n raise Error(\"couldn't decode json\")\n\n return obj", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def to_representation(self, instance):\n\n return instance.region", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def show_instances():\n return get_instances()", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def load_instances_page(self):\n logging.info(\"loading instances page {}\".format(self.horizon_instances_url))\n\n return self._load_page_measure_time(self.driver, self.horizon_instances_url,\n tag = \"Instances Page\")", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def name(self):\n return self._client.project_name + '/instances/' + self.instance_id", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def Get(self, instance_name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesGetRequest(\n zone=zone, project=project, instance=instance_name)\n instance = self.client.instances.Get(request)\n if self._VMCreatedByExecGroup(instance):\n return instance\n raise HttpNotFoundError(\n 'Instance:{} not found'.format(instance_name), None, None)", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def query_region(self, region_name=None, region_id=None):\n\n if region_name is not None and region_id is None:\n query = \"select * from region where region_name='{}'\".format(\n region_name)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n elif region_name is None and region_id is not None:\n query = \"select * from region where region_id='{}'\".format(\n region_id)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def nfvi_get_instances(paging, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('get_instances', paging, context,\n callback=callback)\n return cmd_id", "def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")", "def list_addresses(self, region):\n assert is_valid_region(region), region\n page_token = None\n while True:\n params = {'maxResults': 250}\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api(\n '/regions/%s/addresses' % region, params=params, deadline=120)\n for addr in resp.get('items', []):\n yield addr\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def test_aws_service_api_regions_get(self):\n pass", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def gce_instance_filter(self) -> 'outputs.GceInstanceFilterResponse':\n return pulumi.get(self, \"gce_instance_filter\")", "def get_region_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/index', region, **filters)", "def get_paginator(cls, instance, page=1, item_count=None, items_per_page=50,\n db_session=None,\n filter_params=None, **kwargs):\n if filter_params is None:\n filter_params = {}\n query = cls.get_latest_entries(instance, only_active=True, limit=None)\n return SqlalchemyOrmPage(query, page=page, item_count=item_count,\n items_per_page=items_per_page,\n **kwargs)", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def GetInstanceInfo(self, instance, static=None, reason=None):\n query = []\n if static is not None:\n query.append((\"static\", static))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/info\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get(self, region=None):\n base_url = self.base.base_url[region]\n url = '{}/lol/status/v3/shard-data'.format(base_url)\n r = requests.get(url, headers=self.base.headers)\n return r", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)", "def region(cls):\n return cls.REGION", "def region(self):\n return self._get(\"region\")", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "def describe_instances(self, instance_ids = None):\n response = instance.describe_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = DescribeInstancesResponse.DescribeInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def get_instance_status(module, ecs, zone_id=None, page_number=None, page_size=None):\n changed = False\n try:\n result = ecs.get_instance_status(zone_id=zone_id, page_number=page_number, page_size=page_size)\n\n if 'error' in (''.join(str(result))).lower():\n module.fail_json(msg=result)\n changed = True\n\n except ECSResponseError as e:\n module.fail_json(msg='Unable to get status of instance(s), error: {0}'.format(e)) \n\n return changed, result", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def _get_instance_profile(role_name, iam_client=None, region_name=None):\n if not iam_client:\n iam_client = boto3.client('iam', region_name=region_name)\n try:\n resp = iam_client.get_instance_profile(\n InstanceProfileName=role_name)\n instance_profile_arn = resp['InstanceProfile']['Arn']\n except botocore.exceptions.ClientError as err:\n instance_profile_arn = None\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchEntity':\n raise\n return instance_profile_arn", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def GetInstance(\n self,\n instance_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeVirtualMachine':\n instances = self.ListInstances(resource_group_name=resource_group_name)\n if instance_name not in instances:\n raise errors.ResourceNotFoundError(\n 'Instance {0:s} was not found in subscription {1:s}'.format(\n instance_name, self.az_account.subscription_id), __name__)\n return instances[instance_name]", "def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set" ]
[ "0.65359414", "0.64602095", "0.6369204", "0.6290484", "0.6248728", "0.5925351", "0.59036493", "0.564685", "0.5643462", "0.5618888", "0.5610908", "0.56100076", "0.55792296", "0.55599517", "0.5556868", "0.5544195", "0.55412877", "0.5532845", "0.5486521", "0.5459931", "0.5451159", "0.544322", "0.5440652", "0.54132307", "0.53653216", "0.5346481", "0.534229", "0.53411555", "0.5339474", "0.5339135", "0.53342795", "0.53285027", "0.5323785", "0.53142667", "0.53093743", "0.5290159", "0.5285819", "0.5267814", "0.5257835", "0.52556616", "0.5246127", "0.5244615", "0.5242059", "0.5233834", "0.5220303", "0.51756674", "0.5161309", "0.5160739", "0.51529986", "0.5126975", "0.5126699", "0.5113106", "0.510567", "0.5098133", "0.5090952", "0.5088127", "0.5083395", "0.5074973", "0.5065279", "0.5059674", "0.50435746", "0.5037295", "0.50339186", "0.5014508", "0.5009371", "0.5008984", "0.50083697", "0.4999327", "0.4989854", "0.49891996", "0.498695", "0.4980958", "0.4977251", "0.49634483", "0.4961415", "0.49455416", "0.494482", "0.4939827", "0.4932846", "0.49224705", "0.49208096", "0.49165648", "0.4915834", "0.49112868", "0.49100685", "0.4910008", "0.4906191", "0.4905006", "0.49010554", "0.48983964", "0.48955312", "0.48891973", "0.48798817", "0.4878187", "0.48771825", "0.48743075", "0.48736978", "0.48693976", "0.48624396", "0.4844482", "0.48442042" ]
0.0
-1
If you do not specify an instance when you call this operation, the overview information of all instances in the specified region within this account is returned. Paged query is disabled for this operation.
async def describe_dbinstances_overview_async( self, request: dds_20151201_models.DescribeDBInstancesOverviewRequest, ) -> dds_20151201_models.DescribeDBInstancesOverviewResponse: runtime = util_models.RuntimeOptions() return await self.describe_dbinstances_overview_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def list_instances():\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region.\".format(SESSION.region_name))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n try:\n for instance in EC2_MANAGER.list_instances():\n # get the instance name in the tags list\n name = next((item for item in instance.tags if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance.id,\n instance.instance_type,\n instance.state['Name'],\n name['Value']))\n except ClientError as e:\n ErrManager.err_manager(e)\n\n print(str_sep)", "def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()", "def search(self, q=\"\", version=\"\", instance=\"\", page=1):\n\n access_token = self._get_access_token()\n payload = {\n 'q': q,\n 'version': version,\n 'instance': instance,\n 'page[number]': page,\n 'page[size]': 20,\n }\n headers = {'Authorization': \"Bearer \"+access_token}\n response = requests.get(self.api_endpoint, params=payload, headers=headers)\n\n if response.status_code == 401:\n raise AuthError('not authorized')\n elif response.status_code == 404:\n raise NotFoundError('entry not found')\n elif response.status_code != 200:\n raise Error('HTTP error')\n\n try:\n obj = response.json()\n except (ValueError, KeyError):\n raise Error(\"couldn't decode json\")\n\n return obj", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def describe_instances():\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Describe instances\n instances = ec2_resource.instances.all()\n for instance in instances:\n print('State of the instance \"' + instance.id + '\" is: \"' + instance.state['Name'] + '\"')\n return", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def list_ec2(region, filter_by_kwargs):\n conn = boto.ec2.connect_to_region(region)\n instances = conn.get_only_instances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def list_instances(self):\n\n response = self.client.service.instances().aggregatedList(\n project=self.client.project_id).execute()\n\n zones = response.get('items', {})\n instances = []\n for zone in zones.values():\n for instance in zone.get('instances', []):\n instances.append(instance)\n\n return instances", "def list_instances():\n if request.method == \"GET\":\n return render_template(\"instances.html\")", "def to_representation(self, instance):\n\n return instance.region", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def show_instances():\n return get_instances()", "def list_aws_instances(verbose=False, state='all'):\n conn = get_ec2_connection()\n\n reservations = conn.get_all_reservations()\n instances = []\n for res in reservations:\n for instance in res.instances:\n if state == 'all' or instance.state == state:\n instance = {\n 'id': instance.id,\n 'type': instance.instance_type,\n 'image': instance.image_id,\n 'state': instance.state,\n 'instance': instance,\n }\n instances.append(instance)\n env.instances = instances\n if verbose:\n import pprint\n pprint.pprint(env.instances)", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def list_running_instances(self):\n print '# Running AWS EC2 instances'\n self.compute.list_running_instances()", "def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body", "def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )", "def instances(self, **query):\n return self._list(_instance.Instance, **query)", "def list_instances(self):\n # list instances\n self._list_instances()", "def yield_instances_in_zone(self, zone, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n try:\n resp = self.call_api(\n '/zones/%s/instances' % zone, params=params, deadline=120)\n except net.Error as exc:\n if not page_token and exc.status_code == 400:\n return # no such zone, this is fine...\n raise\n for instance in resp.get('items', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def instances(self):\n if \"instances\" in self._prop_dict:\n return InstancesCollectionPage(self._prop_dict[\"instances\"])\n else:\n return None", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def list_(args):\n\n # Get Config.py\n cloud = get_current_cloud(args.cloud)\n\n instances = cloud.list_instances()\n print_table(print_instance_summary, headers, instances,\n use_color=args.color)\n return instances", "def describe_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Describe an instance\n instance = ec2_resource.Instance(instance_id)\n print('\\nInstance Id: ' + instance_id)\n print('Instance Id: ' + instance.id)\n print('Image Id: ' + instance.image_id)\n print('Instance Type: ' + instance.instance_type)\n print('State: ' + instance.state['Name'])\n if instance.state['Name'] == 'running':\n print('Private DNS Name: ' + instance.private_dns_name)\n print('Private IP: ' + instance.private_ip_address)\n print('Public DNS Name: ' + instance.public_dns_name)\n print('Public IP: ' + instance.public_ip_address)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"MissingParameter\":\n print(\"Error: Missing instance id!!\")\n else:\n raise\n return", "def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def load_instances_page(self):\n logging.info(\"loading instances page {}\".format(self.horizon_instances_url))\n\n return self._load_page_measure_time(self.driver, self.horizon_instances_url,\n tag = \"Instances Page\")", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def list_instances():\n js = _get_jetstream_conn()\n il = js.compute.instances.list()\n if not il:\n msg = \"You don't have any instances available.\"\n else:\n msg = (\"You have {0} instances available. Here are up to 3 most \"\n \"recent: \".format(len(il)))\n msg_ex = \"\"\n content = \"\"\n for i in il[:3]:\n msg_ex += \"{0},\".format(i.name)\n content += \"{0} ({1})\\n\".format(\n i.name, i.public_ips[0] if i.public_ips else i.private_ips[0])\n return statement(msg + msg_ex).simple_card(title=msg, content=content)", "def name(self):\n return self._client.project_name + '/instances/' + self.instance_id", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def Get(self, instance_name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesGetRequest(\n zone=zone, project=project, instance=instance_name)\n instance = self.client.instances.Get(request)\n if self._VMCreatedByExecGroup(instance):\n return instance\n raise HttpNotFoundError(\n 'Instance:{} not found'.format(instance_name), None, None)", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "def aws_get_instances_by_id(region, instance_id, raw=True):\n client = boto3.session.Session().client('ec2', region)\n try:\n matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])\n except ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':\n raise\n return []\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def query_region(self, region_name=None, region_id=None):\n\n if region_name is not None and region_id is None:\n query = \"select * from region where region_name='{}'\".format(\n region_name)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n elif region_name is None and region_id is not None:\n query = \"select * from region where region_id='{}'\".format(\n region_id)\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def running_instances(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n\n # Boto3 resource creation by providing the access_id and access_secret\n ec2 = boto3.resource(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n instances = ec2.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\n for instance in instances:\n attachment = MessageAttachmentsClass()\n attachment.title = instance.id\n message.attach(attachment)\n\n button = MessageButtonsClass()\n button.text = \"Stop Instance\"\n button.value = \"Stop Instance\"\n button.name = \"Stop Instance\"\n button.command = {\"service_application\": self.yellowant_integration_id,\n \"function_name\": \"stop-instance\",\n \"data\": {\"Instance-ID\": instance.id, \"Region\": region}}\n attachment.attach_button(button)\n\n message.message_text = \"Instances Running are:\"\n return message.to_json()", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def show_instance(name, session=None, call=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n log.debug(\"show_instance-> name: %s session: %s\", name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record[\"is_a_template\"] and not record[\"is_control_domain\"]:\n try:\n base_template_name = record[\"other_config\"][\"base_template_name\"]\n except Exception: # pylint: disable=broad-except\n base_template_name = None\n log.debug(\n \"VM %s, does not have base_template_name attribute\",\n record[\"name_label\"],\n )\n ret = {\n \"id\": record[\"uuid\"],\n \"image\": base_template_name,\n \"name\": record[\"name_label\"],\n \"size\": record[\"memory_dynamic_max\"],\n \"state\": record[\"power_state\"],\n \"private_ips\": get_vm_ip(name, session),\n \"public_ips\": None,\n }\n\n __utils__[\"cloud.cache_node\"](ret, _get_active_provider_name(), __opts__)\n return ret", "def nfvi_get_instances(paging, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('get_instances', paging, context,\n callback=callback)\n return cmd_id", "def main(self, _):\n all_addresses = find_addresses.probe_regions()\n\n print(\"\")\n if not all_addresses:\n print(\"No namespace elastic IP addresses found.\")\n\n for region in consts.REGIONS:\n region_addresses = [address for address in all_addresses\n if address['region'] == region]\n if not region_addresses:\n continue\n\n print(f\"{region}: {len(region_addresses)} address(es) found:\")\n for address in region_addresses:\n if 'instance_name' in address:\n print(f\" {address['ip']} ({address['instance_name']})\")\n elif 'association_id' in address:\n print(f\" {address['ip']} (unknown association)\")\n else:\n print(f\" {address['ip']} (not associated)\")", "def list_addresses(self, region):\n assert is_valid_region(region), region\n page_token = None\n while True:\n params = {'maxResults': 250}\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api(\n '/regions/%s/addresses' % region, params=params, deadline=120)\n for addr in resp.get('items', []):\n yield addr\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def test_aws_service_api_regions_get(self):\n pass", "def get_all_instances(self, instance_ids=None, filters=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n if filters:\r\n if 'group-id' in filters:\r\n warnings.warn(\"The group-id filter now requires a security \"\r\n \"group identifier (sg-*) instead of a group \"\r\n \"name. To filter by group name use the \"\r\n \"'group-name' filter instead.\", UserWarning)\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeInstances', params,\r\n [('item', Reservation)], verb='POST')", "def get_ec2_instances(client):\n reservations = client.describe_instances().get(\"Reservations\")\n instances = list(map(lambda x: x.get(\"Instances\"), reservations))\n instances = list(itertools.chain.from_iterable(instances))\n return list(map(lambda x: {\n 'name': next((t['Value'] for t in x.get('Tags', []) if t.get('Key') == 'Name'), 'Unknown'),\n 'id': x.get('InstanceId'),\n 'state': x.get('State'),\n }, instances))", "def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def instances(self, alwaysIncludeEmail=None, maxAttendees=None,\r\n maxResults=None, originalStart=None, pageToken=None,\r\n showDeleted=None, timeZone=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/instances'.format(self.get_url())\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def __get_reservations(self, instance_ids=None):\n if instance_ids:\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n try:\n return euca_conn.get_all_instances(instance_ids)\n except:\n euca.display_error_and_exit('%s' % ex)\n return False", "def gce_instance_filter(self) -> 'outputs.GceInstanceFilterResponse':\n return pulumi.get(self, \"gce_instance_filter\")", "def get_paginator(cls, instance, page=1, item_count=None, items_per_page=50,\n db_session=None,\n filter_params=None, **kwargs):\n if filter_params is None:\n filter_params = {}\n query = cls.get_latest_entries(instance, only_active=True, limit=None)\n return SqlalchemyOrmPage(query, page=page, item_count=item_count,\n items_per_page=items_per_page,\n **kwargs)", "def get_region_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/region/index', region, **filters)", "def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances", "def GetInstanceInfo(self, instance, static=None, reason=None):\n query = []\n if static is not None:\n query.append((\"static\", static))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/info\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def list(self, instance=None, limit=20, marker=0):\n if instance is None:\n return super(CloudDatabaseBackupManager, self).list()\n return self.api._manager._list_backups_for_instance(instance, limit=limit,\n marker=marker)", "def instances(self):\n from office365.outlook.calendar.events.collection import EventCollection\n return self.properties.get('instances',\n EventCollection(self.context, ResourcePath(\"instances\", self.resource_path)))", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get(self, region=None):\n base_url = self.base.base_url[region]\n url = '{}/lol/status/v3/shard-data'.format(base_url)\n r = requests.get(url, headers=self.base.headers)\n return r", "def get_images(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return region_images", "def list_ebss_by_instance():\n\n ec2 = u.create_ec2_resource()\n instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]\n sorted_instances = sorted(instances, key=itemgetter(0))\n\n for (seconds, instance) in sorted_instances:\n\n volumes = instance.volumes.all()\n volume_strs = []\n for v in volumes:\n volume_strs.append(\"%s (%s)\"%(v.id, v.size))\n print(\"%s: %s\" % (u.get_name(instance.tags), ','.join(volume_strs)))", "def list(self, filters: dict = None, state: str = None, exclude: str = None) -> list:\n date_format = '%Y-%m-%d %H:%M:%S'\n self.instances = self.ec2.instances.all()\n\n # TOREMOVE\n def __all_instances():\n # all instances without filtering\n self.instances = [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]\n\n if state:\n try:\n self.instances = self.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': [state]}])\n except IOError as e:\n raise EC2Error('Error listing instances by state {0} {1}'.format(state, e))\n\n if filters:\n # convert string into dict\n filters = literal_eval(filters)\n try:\n if not self.instances:\n self.instances = self.ec2.instances.all()\n\n self.instances = self.instances.filter(Filters=[{'Name': filters['Name'], 'Values': filters['Values']}])\n except IOError as e:\n raise EC2Error('Error listing instances with filters {0} {1}'.format(filters, e))\n\n if exclude:\n instances = []\n for i in self.instances:\n if i.id not in exclude:\n instances.append(i)\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in instances\n ]\n else:\n return [\n {\n 'InstanceId': instance.id,\n 'State': instance.state['Name'],\n 'Type': instance.instance_type,\n 'VpcId': instance.vpc_id,\n 'KeyName': instance.key_name,\n 'Tags': instance.tags,\n 'StartedAt': instance.launch_time.strftime(date_format)\n }\n for instance in self.instances\n ]", "def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def get_info(self, instance):\n shutdown_staues = ['deallocating', 'deallocated',\n 'stopping', 'stopped']\n instance_id = instance.uuid\n state = power_state.NOSTATE\n status = 'Unkown'\n try:\n vm = self.compute.virtual_machines.get(\n CONF.azure.resource_group, instance_id, expand='instanceView')\n # azure may raise msrestazure.azure_exceptions CloudError\n except exception.CloudError as e:\n msg = six.text_type(e)\n if 'ResourceNotFound' in msg:\n raise nova_ex.InstanceNotFound(instance_id=instance.uuid)\n else:\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceGetFailure(reason=six.text_type(e),\n instance_uuid=instance_id)\n raise ex\n else:\n LOG.debug('vm info is: {}'.format(vm))\n if vm and hasattr(vm, 'instance_view') and \\\n hasattr(vm.instance_view, 'statuses') and \\\n vm.instance_view.statuses is not None:\n for i in vm.instance_view.statuses:\n if hasattr(i, 'code') and \\\n i.code and 'PowerState' in i.code:\n status = i.code.split('/')[-1]\n if 'running' == status:\n state = power_state.RUNNING\n elif status in shutdown_staues:\n state = power_state.SHUTDOWN\n break\n LOG.info(_LI('vm: %(instance_id)s state is : %(status)s'),\n dict(instance_id=instance_id, status=status))\n return InstanceInfo(state=state, id=instance_id)", "def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)", "def region(cls):\n return cls.REGION", "def region(self):\n return self._get(\"region\")", "def list_elb(region, filter_by_kwargs):\n conn = boto.ec2.elb.connect_to_region(region)\n instances = conn.get_all_load_balancers()\n return lookup(instances, filter_by=filter_by_kwargs)", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)", "def getImages(region):\n creds = credentials()\n try:\n conn = ec2.connect_to_region(region, **creds)\n images = conn.get_all_images(owners=['self'])\n except boto.exception.EC2ResponseError:\n return []\n return images", "def describe_instances(self, instance_ids = None):\n response = instance.describe_instances(self.url, self.verb,\n self.headers, self.version, instance_ids)\n if response is not None :\n res = DescribeInstancesResponse.DescribeInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]", "def get_instance_status(module, ecs, zone_id=None, page_number=None, page_size=None):\n changed = False\n try:\n result = ecs.get_instance_status(zone_id=zone_id, page_number=page_number, page_size=page_size)\n\n if 'error' in (''.join(str(result))).lower():\n module.fail_json(msg=result)\n changed = True\n\n except ECSResponseError as e:\n module.fail_json(msg='Unable to get status of instance(s), error: {0}'.format(e)) \n\n return changed, result", "def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances", "def instances(self):\n return self.get('instances')", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def list_notebook_instances(NextToken=None, MaxResults=None, SortBy=None, SortOrder=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None, LastModifiedTimeBefore=None, LastModifiedTimeAfter=None, StatusEquals=None, NotebookInstanceLifecycleConfigNameContains=None, DefaultCodeRepositoryContains=None, AdditionalCodeRepositoryEquals=None):\n pass", "def _get_instance_profile(role_name, iam_client=None, region_name=None):\n if not iam_client:\n iam_client = boto3.client('iam', region_name=region_name)\n try:\n resp = iam_client.get_instance_profile(\n InstanceProfileName=role_name)\n instance_profile_arn = resp['InstanceProfile']['Arn']\n except botocore.exceptions.ClientError as err:\n instance_profile_arn = None\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchEntity':\n raise\n return instance_profile_arn", "def _get_running_ec2_instances(theargs):\n mapstr = ''\n if theargs.profile is not None:\n boto3.setup_default_session(profile_name=theargs.profile)\n ec2 = boto3.client('ec2', region_name='us-west-2')\n\n response = ec2.describe_regions()\n for region in response['Regions']:\n rname = region['RegionName']\n sys.stdout.write('Running ec2 query in region: ' + rname + '\\n')\n ec2 = boto3.client('ec2', region_name=rname)\n mapstr += 'Region: ' + rname + '\\n'\n respy = ec2.describe_instances()\n for reso in respy['Reservations']:\n for entry in reso['Instances']:\n namey = ''\n try:\n for keyval in entry['Tags']:\n if keyval['Key'] == 'Name':\n namey = keyval['Value']\n break\n except KeyError:\n pass\n\n mapstr += ('\\t\\t' + entry['PublicDnsName'] + '\\n' +\n '\\t\\tLaunch Date: ' + str(entry['LaunchTime']) +\n '\\n' + \n '\\t\\tId: ' + entry['InstanceId'] + '\\n' +\n '\\t\\tType: ' + entry['InstanceType'] + '\\n' +\n '\\t\\tName: ' + namey + '\\n' +\n '\\t\\tState: ' + entry['State']['Name'] + '\\n\\n')\n sys.stdout.write('\\nResults:\\n\\n')\n return mapstr", "def instances(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceStatusArgs']]]]:\n return pulumi.get(self, \"instances\")", "def instances(request): #pylint: disable=unused-argument\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def GetInstance(\n self,\n instance_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeVirtualMachine':\n instances = self.ListInstances(resource_group_name=resource_group_name)\n if instance_name not in instances:\n raise errors.ResourceNotFoundError(\n 'Instance {0:s} was not found in subscription {1:s}'.format(\n instance_name, self.az_account.subscription_id), __name__)\n return instances[instance_name]", "def get_ec2_reservations(profile, running_filter):\n try:\n ec2_client = boto3.Session(profile_name=profile).client('ec2')\n except ProfileNotFound:\n print(\"Profile: %s not found\" % profile, file=sys.stderr)\n sys.exit(1)\n filtered_instances = ec2_client.describe_instances(Filters=running_filter)\n return filtered_instances['Reservations']", "def List(self, zone):\n project = properties.VALUES.core.project.Get(required=True)\n request = self.messages.ComputeInstancesListRequest(\n zone=zone, project=project)\n instances = list_pager.YieldFromList(\n service=self.client.instances,\n request=request,\n method='List',\n field='items')\n\n result_set = []\n for instance in instances:\n if self._VMCreatedByExecGroup(instance):\n result_set.append(instance)\n\n return result_set" ]
[ "0.65340656", "0.6458269", "0.6366381", "0.629105", "0.6246534", "0.5922886", "0.59018", "0.5643555", "0.56427896", "0.56168956", "0.5608682", "0.5606965", "0.55774665", "0.5557363", "0.55550456", "0.55415493", "0.554151", "0.5530633", "0.54847854", "0.54578453", "0.54482913", "0.54414165", "0.544037", "0.54121864", "0.53633773", "0.534497", "0.534028", "0.5338845", "0.5337171", "0.5336713", "0.5334364", "0.53264004", "0.53212565", "0.531351", "0.5308083", "0.52888674", "0.5284958", "0.5269275", "0.525645", "0.5254932", "0.52441925", "0.52428544", "0.52408123", "0.52342486", "0.52195746", "0.5174801", "0.51604795", "0.5160464", "0.51510537", "0.51260316", "0.51246214", "0.5109683", "0.5104918", "0.5097117", "0.5088802", "0.50874054", "0.5080086", "0.5073764", "0.5062834", "0.5056398", "0.50405365", "0.5033949", "0.50326306", "0.5013164", "0.50109", "0.5009431", "0.50072205", "0.49969715", "0.49879882", "0.49860045", "0.4983498", "0.49814904", "0.49765894", "0.49618402", "0.49607977", "0.49452078", "0.49443215", "0.49369094", "0.49326023", "0.4920249", "0.49187288", "0.49159122", "0.49154755", "0.4910125", "0.49100545", "0.49080744", "0.49044865", "0.49041796", "0.4899154", "0.4894491", "0.48921943", "0.48870346", "0.4879538", "0.48756737", "0.48737946", "0.48730484", "0.48726323", "0.48673704", "0.4861928", "0.48437214", "0.4841884" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_error_log_records_with_options( self, request: dds_20151201_models.DescribeErrorLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeErrorLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeErrorLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeErrorLogRecordsResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_error_log_records_with_options_async( self, request: dds_20151201_models.DescribeErrorLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeErrorLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeErrorLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeErrorLogRecordsResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def _load_disk(self):\r\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6249917", "0.600348", "0.5359398", "0.5338779", "0.5310546", "0.52928746", "0.5282921", "0.5279352", "0.5279001", "0.52591884", "0.521372", "0.52056307", "0.5136519", "0.5106076", "0.5093585", "0.5093585", "0.508893", "0.50791097", "0.507007", "0.5068071", "0.50492823", "0.5047607", "0.5041982", "0.5033806", "0.50300545", "0.50300545", "0.50220966", "0.50161654", "0.49987411", "0.4973591", "0.49686015", "0.4965938", "0.49597555", "0.4947962", "0.4918784", "0.4917084", "0.49052343", "0.49005905", "0.48973042", "0.48950753", "0.48905793", "0.48889422", "0.48816362", "0.48815823", "0.48786694", "0.48686954", "0.4864698", "0.48646566", "0.4858941", "0.48564273", "0.485455", "0.4843747", "0.48412818", "0.48370776", "0.48358908", "0.4833253", "0.48229158", "0.4806377", "0.4805858", "0.48033783", "0.48021865", "0.47964823", "0.47750932", "0.4768405", "0.4758163", "0.4757573", "0.47427362", "0.4735837", "0.4733725", "0.47297028", "0.47232515", "0.47209674", "0.47128278", "0.47117206", "0.47072574", "0.47022194", "0.46999288", "0.46977976", "0.46937203", "0.46901488", "0.4685285", "0.4685246", "0.46755475", "0.4670844", "0.46674314", "0.46657613", "0.4653522", "0.46523577", "0.46428913", "0.46363285", "0.46357659", "0.46331036", "0.46296275", "0.46278575", "0.4626488", "0.46094543", "0.45972398", "0.45966548", "0.45935205", "0.459188", "0.4590052" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_error_log_records( self, request: dds_20151201_models.DescribeErrorLogRecordsRequest, ) -> dds_20151201_models.DescribeErrorLogRecordsResponse: runtime = util_models.RuntimeOptions() return self.describe_error_log_records_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_error_log_records_async( self, request: dds_20151201_models.DescribeErrorLogRecordsRequest, ) -> dds_20151201_models.DescribeErrorLogRecordsResponse: runtime = util_models.RuntimeOptions() return await self.describe_error_log_records_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.62486845", "0.60031223", "0.53603905", "0.53379613", "0.5311335", "0.52926815", "0.5282698", "0.52791435", "0.5279122", "0.525997", "0.52146244", "0.52052414", "0.5137806", "0.51062226", "0.5093556", "0.5093556", "0.5088433", "0.5079323", "0.50690126", "0.5067886", "0.5049035", "0.5046898", "0.50420696", "0.50331473", "0.5030297", "0.5030297", "0.5022431", "0.5016063", "0.49990737", "0.4972949", "0.4968265", "0.4965747", "0.49598062", "0.49484637", "0.49194708", "0.49178702", "0.4905472", "0.49004963", "0.48970413", "0.489522", "0.48909563", "0.4887827", "0.48821267", "0.4881891", "0.48785982", "0.48678872", "0.48652878", "0.48639703", "0.48581663", "0.48568252", "0.4854318", "0.48435727", "0.48408124", "0.48365796", "0.4836489", "0.48329967", "0.4823732", "0.4806852", "0.4805655", "0.48033065", "0.480192", "0.4796162", "0.4775947", "0.47686175", "0.4759035", "0.47575393", "0.47434506", "0.47358102", "0.47347638", "0.47308475", "0.47239348", "0.47207734", "0.47131824", "0.47117183", "0.47061667", "0.4702254", "0.4699498", "0.4697398", "0.46924555", "0.46898118", "0.4685107", "0.46848604", "0.46757394", "0.4671213", "0.46672994", "0.4666417", "0.46534035", "0.4653088", "0.46438026", "0.4637459", "0.4635404", "0.46326485", "0.46303207", "0.46281067", "0.4626461", "0.4609743", "0.45968643", "0.45966214", "0.45938614", "0.45920384", "0.45898995" ]
0.0
-1
This operation is applicable to subscription instances.
def describe_instance_auto_renewal_attribute_with_options( self, request: dds_20151201_models.DescribeInstanceAutoRenewalAttributeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbinstance_type): query['DBInstanceType'] = request.dbinstance_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeInstanceAutoRenewalAttribute', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6768464", "0.6724986", "0.67119396", "0.6617272", "0.659594", "0.6585656", "0.65232456", "0.6443254", "0.6432979", "0.6342068", "0.6316182", "0.6316182", "0.63120866", "0.62734044", "0.6266068", "0.6236521", "0.6234891", "0.6200018", "0.6196757", "0.61762744", "0.61590755", "0.6145037", "0.61427397", "0.6117785", "0.60913867", "0.6046587", "0.60381734", "0.60381734", "0.60095227", "0.60068405", "0.6002366", "0.5974324", "0.5970992", "0.59404236", "0.5922197", "0.58950824", "0.58852506", "0.58791226", "0.58771014", "0.5865434", "0.5850745", "0.5843618", "0.5842252", "0.5842233", "0.5829431", "0.5820333", "0.58125776", "0.57993525", "0.57912076", "0.57912076", "0.57912076", "0.57897145", "0.57676", "0.57596546", "0.5758095", "0.57508147", "0.5745929", "0.57144934", "0.5696573", "0.5695495", "0.56950444", "0.5684795", "0.56609845", "0.5660792", "0.56408226", "0.56199765", "0.5612484", "0.5610517", "0.5599685", "0.5596116", "0.5595185", "0.55934614", "0.55910414", "0.55863214", "0.55849946", "0.55775213", "0.55711204", "0.5563398", "0.5536979", "0.55124307", "0.5510051", "0.5499565", "0.546586", "0.54585266", "0.54536515", "0.5451924", "0.5440518", "0.5438442", "0.54334587", "0.54208875", "0.5406239", "0.5402143", "0.53994006", "0.5399044", "0.53945386", "0.5393975", "0.53886455", "0.53747004", "0.53721243", "0.53708404", "0.5369461" ]
0.0
-1
This operation is applicable to subscription instances.
async def describe_instance_auto_renewal_attribute_with_options_async( self, request: dds_20151201_models.DescribeInstanceAutoRenewalAttributeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbinstance_type): query['DBInstanceType'] = request.dbinstance_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeInstanceAutoRenewalAttribute', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def get_subscriptions(self):\n return self.subscriptions.all()", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6766924", "0.6723815", "0.67114395", "0.66165465", "0.6594774", "0.65839344", "0.6522862", "0.6443223", "0.64325064", "0.63409066", "0.6316549", "0.6316549", "0.63120353", "0.62725765", "0.62641907", "0.62356377", "0.6234742", "0.6200784", "0.6197641", "0.6175624", "0.61567813", "0.6144675", "0.6142669", "0.6117894", "0.60898656", "0.6045312", "0.6037747", "0.6037747", "0.60090065", "0.6006827", "0.6002745", "0.5973457", "0.59708565", "0.5938422", "0.59216875", "0.58956856", "0.5884083", "0.5878568", "0.58764887", "0.5864635", "0.58512646", "0.58422667", "0.5841695", "0.5840756", "0.58291566", "0.5818745", "0.5813365", "0.5799376", "0.5790863", "0.5790863", "0.5790863", "0.5789006", "0.5766831", "0.5760116", "0.5756521", "0.57498324", "0.5745291", "0.5712999", "0.5696784", "0.56962174", "0.56935626", "0.5683641", "0.5660921", "0.56607026", "0.5641244", "0.5619445", "0.5611283", "0.5609982", "0.5598294", "0.55964607", "0.5595865", "0.5592494", "0.55910856", "0.5585468", "0.55849886", "0.55776924", "0.55707777", "0.55619156", "0.55364245", "0.55118704", "0.55088645", "0.5499071", "0.5464195", "0.54575783", "0.5453018", "0.5450689", "0.5440342", "0.5438776", "0.5432882", "0.5420828", "0.5404704", "0.54024047", "0.5397794", "0.5397691", "0.5393936", "0.5392811", "0.5388118", "0.53736436", "0.5372243", "0.5370428", "0.5369889" ]
0.0
-1
This operation is applicable to subscription instances.
def describe_instance_auto_renewal_attribute( self, request: dds_20151201_models.DescribeInstanceAutoRenewalAttributeRequest, ) -> dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse: runtime = util_models.RuntimeOptions() return self.describe_instance_auto_renewal_attribute_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6768464", "0.6724986", "0.67119396", "0.6617272", "0.659594", "0.6585656", "0.65232456", "0.6443254", "0.6432979", "0.6342068", "0.6316182", "0.6316182", "0.63120866", "0.62734044", "0.6266068", "0.6236521", "0.6234891", "0.6200018", "0.6196757", "0.61762744", "0.61590755", "0.6145037", "0.61427397", "0.6117785", "0.60913867", "0.6046587", "0.60381734", "0.60381734", "0.60095227", "0.60068405", "0.6002366", "0.5974324", "0.5970992", "0.59404236", "0.5922197", "0.58950824", "0.58852506", "0.58791226", "0.58771014", "0.5865434", "0.5850745", "0.5843618", "0.5842252", "0.5842233", "0.5829431", "0.5820333", "0.58125776", "0.57993525", "0.57912076", "0.57912076", "0.57912076", "0.57897145", "0.57676", "0.57596546", "0.5758095", "0.57508147", "0.5745929", "0.57144934", "0.5696573", "0.5695495", "0.56950444", "0.5684795", "0.56609845", "0.5660792", "0.56408226", "0.56199765", "0.5612484", "0.5610517", "0.5599685", "0.5596116", "0.5595185", "0.55934614", "0.55910414", "0.55863214", "0.55849946", "0.55775213", "0.55711204", "0.5563398", "0.5536979", "0.55124307", "0.5510051", "0.5499565", "0.546586", "0.54585266", "0.54536515", "0.5451924", "0.5440518", "0.5438442", "0.54334587", "0.54208875", "0.5406239", "0.5402143", "0.53994006", "0.5399044", "0.53945386", "0.5393975", "0.53886455", "0.53747004", "0.53721243", "0.53708404", "0.5369461" ]
0.0
-1
This operation is applicable to subscription instances.
async def describe_instance_auto_renewal_attribute_async( self, request: dds_20151201_models.DescribeInstanceAutoRenewalAttributeRequest, ) -> dds_20151201_models.DescribeInstanceAutoRenewalAttributeResponse: runtime = util_models.RuntimeOptions() return await self.describe_instance_auto_renewal_attribute_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)" ]
[ "0.6767392", "0.672459", "0.6712795", "0.66163754", "0.659562", "0.6585582", "0.6523138", "0.6443233", "0.64327395", "0.63420546", "0.6317172", "0.6317172", "0.6311791", "0.6272946", "0.62651706", "0.623661", "0.6234481", "0.62003255", "0.61978865", "0.61763334", "0.6157788", "0.61439747", "0.61425656", "0.611801", "0.60908514", "0.6046276", "0.6038657", "0.6038657", "0.6009132", "0.6007158", "0.6003454", "0.5973642", "0.597068", "0.59394664", "0.59223974", "0.58971477", "0.588576", "0.58791244", "0.58758944", "0.5865233", "0.58518696", "0.5842229", "0.58422184", "0.5841435", "0.58288974", "0.5820687", "0.58134615", "0.579945", "0.5791124", "0.5791124", "0.5791124", "0.57893735", "0.5767502", "0.5760433", "0.575773", "0.5749892", "0.5745516", "0.57154554", "0.56971544", "0.56968284", "0.56943005", "0.5684036", "0.5661033", "0.5660045", "0.5640525", "0.5619953", "0.5611931", "0.5611749", "0.5599006", "0.55963916", "0.5596389", "0.55936116", "0.55907357", "0.5586081", "0.5585373", "0.5577829", "0.5570555", "0.5562918", "0.55378914", "0.5512908", "0.5509801", "0.5498297", "0.54666364", "0.5457887", "0.5454401", "0.5451905", "0.54399776", "0.5438099", "0.5433395", "0.54212356", "0.5406004", "0.54027253", "0.539961", "0.53986406", "0.53941923", "0.53938574", "0.5387973", "0.53744996", "0.53727114", "0.537074", "0.53703344" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. This operation depends on the audit log feature of ApsaraDB for MongoDB. You can enable the audit log feature based on your business needs. For more information, see [Enable the audit log feature](~~59903~~) Starting from January 6, 2022, the official edition of the audit log feature has been launched in all regions, and new applications for the free trial edition have ended. For more information, see [Notice on official launch of the payasyougo audit log feature and no more application for the free trial edition](~~377480~~)
def describe_mongo_dblog_config_with_options( self, request: dds_20151201_models.DescribeMongoDBLogConfigRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeMongoDBLogConfig', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeMongoDBLogConfigResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_audit_monitoring():\n __enable_data_access_logging()\n __enable_log_streaming()\n __create_audit_alerts()\n __get_incidents_history()", "def __enable_data_access_logging():\n _tempFile = \"tmp_audit_config.json\"\n\n auditConfig = {\n \"auditConfigs\": [\n {\n \"auditLogConfigs\": [\n {\n \"logType\": \"ADMIN_READ\"\n },\n {\n \"logType\": \"DATA_WRITE\"\n },\n {\n \"logType\": \"DATA_READ\"\n }\n ],\n \"service\": \"allServices\",\n }\n ]\n }\n\n # get current policy\n run_command('gcloud projects get-iam-policy {} --format=json >>{}'.format(PROJECT_ID, _tempFile))\n\n # merge it with above-defined config\n merge_JSON(auditConfig, _tempFile)\n\n # set the policy\n run_command('gcloud projects set-iam-policy {} {}'.format(PROJECT_ID, _tempFile))\n\n # delete the temp file\n run_command('rm {}'.format(_tempFile))", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def test_mongodb_oplog_origin(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n time_now = int(time.time())\n mongodb_oplog = pipeline_builder.add_stage('MongoDB Oplog')\n database_name = get_random_string(ascii_letters, 10)\n # Specify that MongoDB Oplog needs to read changes occuring after time_now.\n mongodb_oplog.set_attributes(collection='oplog.rs', initial_timestamp_in_secs=time_now, initial_ordinal=1)\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_oplog >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Insert documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are inserted in that collection.\n mongodb_database = mongodb.engine[database_name]\n mongodb_collection = mongodb_database[get_random_string(ascii_letters, 10)]\n input_rec_count = 6\n inserted_list = mongodb_collection.insert_many([{'x': i} for i in range(input_rec_count)])\n assert len(inserted_list.inserted_ids) == input_rec_count\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(snapshot[mongodb_oplog].output) == input_rec_count\n for record in list(enumerate(snapshot[mongodb_oplog].output)):\n assert record[1].value['value']['o']['value']['x']['value'] == str(record[0])\n # Verify the operation type is 'i' which is for 'insert' since we inserted the records earlier.\n assert record[1].value['value']['op']['value'] == 'i'\n assert record[1].value['value']['ts']['value']['timestamp']['value'] > time_now\n\n finally:\n logger.info('Dropping %s database...', database_name)\n mongodb.engine.drop_database(database_name)", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def get_audit(self, query, session):\n raise NotImplementedError()", "def test_old_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 10, 9)\n assert key.audit_state == 'stagnant_expire'", "def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def view_audit_log(_) -> int:\n return 1 << 7", "def view_audit_log(_) -> int:\n return 1 << 7", "def test_normal(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n k = Key('username', 'keyid', 'Active', created, last_used)\n k.audit(60, 80, 20, 19)\n assert k.creation_age == 15\n assert k.audit_state == 'good'", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def log(msg):\n\n print('datastore: %s' % msg)", "def AddSqlServerAudit(parser):\n parser.add_argument(\n '--audit-bucket-path',\n required=False,\n help=(\n 'The location, as a Cloud Storage bucket, to which audit files are '\n 'uploaded. The URI is in the form gs://bucketName/folderName. Only '\n 'available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-retention-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='7d'),\n required=False,\n help=(\n 'The number of days for audit log retention on disk, for example, 3d'\n 'for 3 days. Only available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-upload-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='720m'),\n required=False,\n help=(\n 'How often to upload audit logs (audit files), for example, 30m'\n 'for 30 minutes. Only available for SQL Server instances.'\n ),\n )", "def setup_tap_mongodb(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mongodb.sh')\n self._run_command(db_script)", "def logs(self, container: Container) -> str:", "def add_log4mongo():\n remove_log4mongo()\n root = logging.getLogger()\n root.addHandler(MongoHandler())", "def _add_connection_info(report_kvs, db):\n report_kvs['Flavor'] = 'mongodb'\n report_kvs['Database'] = db.name\n report_kvs['RemoteHost'] = db.connection.host\n report_kvs['RemotePort'] = db.connection.port", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def update_log(ident, document):\n logs_col.update_one({\"_id\": ident}, {\"$set\": document})", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def audit(msg):\n global auditLogger\n if auditLogger is not None:\n auditLogger.info(msg)", "def get_log(ident):\n return logs_col.find_one({\"_id\": ObjectId(ident)})", "def audit(audit_code, audit_str, request, system_initiated=False):\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if system_initiated is False:\n ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n now, err = datetime_utils.get_epoch(when='now', num_previous_days=0)\n if err:\n raise Exception(er)\n if system_initiated:\n username = 'System'\n source_ip = 'System'\n else:\n username = request.user.username\n source_ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n command_list = []\n cmd = [\n 'insert into audit(audit_time, username, source_ip, audit_code, audit_str) values (?,?,?,?,?)', (now, username, source_ip, audit_code, audit_str,)]\n command_list.append(cmd)\n audit_id, err = db.execute_iud(db_path, command_list, get_rowid=True)\n if err:\n raise Exception(err)\n ret, err = event_notifications.record_event_notification_holding(\n event_id=audit_id, event_type_id=2)\n if err:\n raise Exception(err)\n\n except Exception, e:\n return False, 'Error performing an audit operation : %s' % str(e)\n else:\n return True, None", "def event_log(self):\n pass", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def test_mongo_logging_client_persists_log():\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message", "def to(cls, database=\"AUDIT\", collection=\"log\",\n mongodb_uri=\"mongodb://localhost:27017\", level=logging.NOTSET):\n return cls(mongodb_uri, database, collection, level)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def audit(self):\n self.ping()", "def db_for_write(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def setup_logging():\n log.setup('keystone')", "def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')", "def logQuote(cmdDict):\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def cf_log_analytics_data_plane(cli_ctx, _):\n from azure.loganalytics import LogAnalyticsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.log_analytics_resource_id)\n api_version = 'v1'\n return LogAnalyticsDataClient(cred,\n base_url=cli_ctx.cloud.endpoints.log_analytics_resource_id + '/' + api_version)", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def test_aud_from_log_ignores_index():\n assert True", "def main():\n\n args = cli.get_args()\n\n audit = {}\n try:\n service_instance,content = connect_vc(args.host,args.user,args.password,args.port)\n\n if sys.stdout.isatty():\n print(\"vCenter: %s\" % args.host)\n \n content = service_instance.RetrieveContent()\n\n container = content.rootFolder # starting point to look into\n datacenters = get_datacenters(content)\n for dc in datacenters:\n datacenters[dc]['clusters'] = get_clusters(datacenters[dc]['dc'])\n\n datacenters[dc]['vms'] = get_vms(datacenters[dc]['dc'].vmFolder)\n \n get_nets(dc)\n get_dstores(dc)\n\n vmcount=0\n \n for dc in datacenters:\n for vm in sorted(datacenters[dc]['vms'],key=lambda s: s.lower()):\n vmcount+=1\n v = datacenters[dc]['vms'][vm]\n c = find_cluster(datacenters[dc]['clusters'],v.runtime.host.name)\n vort = \"Template\" if v.summary.config.template == True else \"VM\"\n audit[v.name]={}\n audit[v.name]['datacenter'] = dc\n audit[v.name]['cluster'] = c\n audit[v.name]['type'] = vort\n audit[v.name]['hostname'] = v.summary.guest.hostName\n audit[v.name]['guestid'] = v.config.guestId\n audit[v.name]['fullname'] = v.summary.config.guestFullName\n audit[v.name]['state'] = v.runtime.powerState\n audit[v.name]['ip'] = v.guest.ipAddress\n if sys.stdout.isatty():\n print(vmcount,\"Guests processed\",end='\\r')\n sys.stdout.flush()\n# print(\"%-15s:%-10s %-8s %-30s %-30s %s %s %s %s\" % (dc, c, vort,v.name,v.summary.guest.hostName, v.config.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress ))\n #print vort, v.name, v.summary.guest.hostName, v.guest.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress #,v.summary\n# print(\"\\ncount:\",vmcount)\n \n print(json.dumps(audit, indent=4, separators=(',', ': ')))\n \n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')", "def log_image(data_category, image_name, path=None, plot=None, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricImage({})\".format(image_name))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_image(image_name, path, plot, **kwargs)\n run.flush()", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")", "def enable_access_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_access_logging\")", "def modify_audit_log_filter_with_options(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.filter):\n query['Filter'] = request.filter\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_type):\n query['RoleType'] = request.role_type\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyAuditLogFilter',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyAuditLogFilterResponse(),\n self.call_api(params, req, runtime)\n )", "def __init__(self) -> None:\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)", "def test_rotate_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 20, 10)\n assert key.audit_state == 'stagnant'", "def test_read_namespaced_deployment_log_log(self):\n pass", "def test_resourcelog(client, test_database, session):\n test_database.refresh()\n\n usersession_id = 1\n remote_addr = \"127.0.0.1\"\n\n # Without payload\n r = client.get(\"/api/v1/config/\")\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 2 # 2 entries since API did a login as first entry\n\n rl = rlogs[-1]\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"GET\"\n assert rl.resource == \"/api/v1/config/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # With payload\n\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 4 # 4 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 200\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Make sure /login doesn't log passwords\n payload_data = {\"username\": \"abc\", \"password\": \"123\"}\n r = client.post(\"/api/v1/users/actions/login/\", payload_data)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 6 # 6 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 401 # User doesn't exist\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/users/actions/login/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Test logging when not logged in\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n client.logout()\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data, username=None)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 9 # logout counts as 1\n\n rl = rlogs[-1]\n assert statuscode == 403\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id is None\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert isinstance(rl.time, datetime.datetime)", "def upgrade_to_21():\n\n def update_project_template(template):\n new_template = {'acquisitions': []}\n for a in template.get('acquisitions', []):\n new_a = {'minimum': a['minimum']}\n properties = a['schema']['properties']\n if 'measurement' in properties:\n m_req = properties['measurement']['pattern']\n m_req = re.sub('^\\(\\?i\\)', '', m_req)\n new_a['files']=[{'measurement': m_req, 'minimum': 1}]\n if 'label' in properties:\n l_req = properties['label']['pattern']\n l_req = re.sub('^\\(\\?i\\)', '', l_req)\n new_a['label'] = l_req\n new_template['acquisitions'].append(new_a)\n\n return new_template\n\n def dm_v2_updates(cont_list, cont_name):\n for container in cont_list:\n\n query = {'_id': container['_id']}\n update = {'$rename': {'metadata': 'info'}}\n\n if cont_name == 'projects' and container.get('template'):\n new_template = update_project_template(json.loads(container.get('template')))\n update['$set'] = {'template': new_template}\n\n\n if cont_name == 'sessions':\n update['$rename'].update({'subject.metadata': 'subject.info'})\n\n\n measurement = None\n modality = None\n info = None\n if cont_name == 'acquisitions':\n update['$unset'] = {'instrument': '', 'measurement': ''}\n measurement = container.get('measurement', None)\n modality = container.get('instrument', None)\n info = container.get('metadata', None)\n if info:\n config.db.acquisitions.update_one(query, {'$set': {'metadata': {}}})\n\n\n # From mongo docs: '$rename does not work if these fields are in array elements.'\n files = container.get('files')\n if files is not None:\n updated_files = []\n for file_ in files:\n file_['info'] = {}\n if 'metadata' in file_:\n file_['info'] = file_.pop('metadata', None)\n if 'instrument' in file_:\n file_['modality'] = file_.pop('instrument', None)\n if measurement:\n # Move the acquisition's measurement to all files\n if file_.get('measurements'):\n file_['measurements'].append(measurement)\n else:\n file_['measurements'] = [measurement]\n if info and file_.get('type', '') == 'dicom':\n # This is going to be the dicom header info\n updated_info = info\n updated_info.update(file_['info'])\n file_['info'] = updated_info\n if modality and not file_.get('modality'):\n file_['modality'] = modality\n\n updated_files.append(file_)\n if update.get('$set'):\n update['$set']['files'] = updated_files\n else:\n update['$set'] = {'files': updated_files}\n\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$or':[{'files.metadata': { '$exists': True}},\n {'metadata': { '$exists': True}},\n {'files.instrument': { '$exists': True}}]}\n\n dm_v2_updates(config.db.collections.find(query), 'collections')\n\n query['$or'].append({'template': { '$exists': True}})\n dm_v2_updates(config.db.projects.find({}), 'projects')\n\n query['$or'].append({'subject': { '$exists': True}})\n dm_v2_updates(config.db.sessions.find(query), 'sessions')\n\n query['$or'].append({'instrument': { '$exists': True}})\n query['$or'].append({'measurement': { '$exists': True}})\n dm_v2_updates(config.db.acquisitions.find(query), 'acquisitions')", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "def search_log(doc_type, query):\n if query:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ],\n \"query\": {\n \"term\": {\n \"_all\": \"the\"\n }\n }\n }\n else:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ]\n }\n if doc_type:\n print \"condition 1 true\"\n res = es.search(index=\"logs\", doc_type=str(doc_type).strip(), body=body)\n else:\n res = es.search(index=\"logs\", body=body)\n\n data = []\n if not res.get('timed_out'):\n for item in res[\"hits\"][\"hits\"]:\n data.append({\n 'client_ip': item['_source'].get('client_ip'),\n 'client': item['_source'].get('client'),\n 'log': item['_source'].get('log'),\n 'service': item['_source'].get('service'),\n })\n response = {\"data\": data}\n return response", "def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "async def modlog(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n entity=self.bot.get_command(\"modlog\")\r\n p = await HelpPaginator.from_command(ctx, entity)\r\n await p.paginate()\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n else:\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)", "def on_a(self):\r\n self.log()", "def add_audit_cols(df, changedt):\n df = df.withColumn(\"operation\", f.lit(\"I\")) \\\n .withColumn(\"processeddate\", f.current_timestamp().cast(\"String\")) \\\n .withColumn(\"changedate\", f.lit(changedt)) \\\n .withColumn('changedate_year', f.year('changedate').cast(\"String\")) \\\n .withColumn('changedate_month', f.month('changedate').cast(\"String\")) \\\n .withColumn('changedate_day', f.dayofmonth('changedate').cast(\"String\"))\n return df", "def record(params, git_info = {}):\n print \"recording...\"\n\n try:\n # connect to MongoDB\n # config = json.load(open(os.environ.get('HOME') + \"/sandbox/config.json\"))\n config = json.load(open(os.environ.get('HOME') + \"/LSEMS/config.json\"))\n try:\n client = MongoClient(config[\"mongodb_url\"])\n except Exception as e:\n raise Exception(\"fail to connect to given MongoDB address: \" + DB_addr)\n\n # check and run the thing\n missing = checkKeys(params, ['data_set', 'src', 'type', 'param'])\n if len(missing) != 0:\n raise Exception(\"missing attribute\"+('s' if len(missing)!=1 else '')+\": \"+str(missing))\n\n params['time'] = asctime()\n params['commit_id'] = git_info['commit_id']\n params['name'] = git_info['name']\n repo_name = git_info['repo_name']\n params['repo_name'] = repo_name\n user = verifyUser(client, git_info['name'])\n\n exp = user.find_one({'exp_name': repo_name})\n if not exp:\n print 'adding new experiment '+repo_name+'...'\n user.insert({'exp_name': repo_name, 'exp_records':[]})\n old_records = user.find_one({'exp_name': repo_name})['exp_records']\n user.update({'exp_name': repo_name}, {'$set': {'exp_records': old_records + [params]}})\n\n print params\n #user.insert(params)\n client.close()\n return True,params\n except Exception as e:\n print e\n print \"Aborting...\"\n return False,{}", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def command(ctx):\n ctx.setup_logger(format='')", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def test_sudoers_audit(host):\n with host.sudo():\n sudoers_access = host.run(\"touch /etc/sudoers\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/sudoers\\\"\")\n assert audit_log.stdout", "def sync_volumeaccessright_record( vac ):\n \n syndicate_caps = \"UNKNOWN\" # for exception handling\n \n # get arguments\n config = observer_core.get_config()\n principal_id = vac.owner_id.email\n volume_name = vac.volume.name\n syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) \n \n logger.info( \"Sync VolumeAccessRight for (%s, %s)\" % (principal_id, volume_name) )\n \n # validate config\n try:\n observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET\")\n raise e\n \n # ensure the user exists and has credentials\n try:\n rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )\n assert rc is True, \"Failed to ensure principal %s exists (rc = %s,%s)\" % (principal_id, rc, user)\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to ensure user '%s' exists\" % principal_id )\n raise e\n\n # grant the slice-owning user the ability to provision UGs in this Volume\n try:\n rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )\n assert rc is True, \"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name)\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name))\n raise e\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Faoed to ensure user %s can access Volume %s with rights %s\" % (principal_id, volume_name, syndicate_caps))\n raise e\n\n return True", "def test_log_extra_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step_extra(len, print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def log_all(self):\n self.save_raw()\n self.log()", "def __init__(__self__, *,\n copy_log_details_type: str,\n disk_serial_number: str,\n error_log_link: str,\n verbose_log_link: str):\n pulumi.set(__self__, \"copy_log_details_type\", 'DataBoxDisk')\n pulumi.set(__self__, \"disk_serial_number\", disk_serial_number)\n pulumi.set(__self__, \"error_log_link\", error_log_link)\n pulumi.set(__self__, \"verbose_log_link\", verbose_log_link)", "def upgrade_to_15():\n query = {}\n query['$or'] = [\n {'timestamp':''},\n {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$not': {'$type':2}}},\n {'timestamp': {'$not': {'$type':9}}}\n ]}\n ]\n unset = {'$unset': {'timestamp': ''}}\n\n config.db.sessions.update_many(query, unset)\n config.db.acquisitions.update_many(query, unset)\n\n query = {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$type':2}}\n ]}\n sessions = config.db.sessions.find(query)\n for s in sessions:\n try:\n fixed_timestamp = dateutil.parser.parse(s['timestamp'])\n except:\n config.db.sessions.update_one({'_id': s['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': s['_id']}, {'$set': {'timestamp': fixed_timestamp}})\n\n acquisitions = config.db.acquisitions.find(query)\n for a in acquisitions:\n try:\n fixed_timestamp = dateutil.parser.parse(a['timestamp'])\n except:\n config.db.sessions.update_one({'_id': a['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': a['_id']}, {'$set': {'timestamp': fixed_timestamp}})", "def clb_access_logging_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n if elb.describe_load_balancer_attributes(LoadBalancerName=clbName)[\"LoadBalancerAttributes\"][\"AccessLog\"][\"Enabled\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def test_dump_collection(self):\n\n test_oplog, primary_conn, search_ts = self.get_oplog_thread()\n solr = DocManager()\n test_oplog.doc_manager = solr\n\n #with documents\n primary_conn['test']['test'].insert({'name': 'paulie'})\n search_ts = test_oplog.get_last_oplog_timestamp()\n test_oplog.dump_collection()\n\n test_oplog.doc_manager.commit()\n solr_results = solr._search()\n self.assertEqual(len(solr_results), 1)\n solr_doc = solr_results[0]\n self.assertEqual(long_to_bson_ts(solr_doc['_ts']), search_ts)\n self.assertEqual(solr_doc['name'], 'paulie')\n self.assertEqual(solr_doc['ns'], 'test.test')", "def save_to_mongo(self, collection='pending_trailers'):\n Database.insert(collection=collection, data=self.json())", "def test_post_add_log_event(self):\n pass", "def enable_audit_logging(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def create_audit_log_for_request_decorator(response):\n return create_audit_log_for_request(response)\n\n if is_audit_enabled():\n # we can't add the `after_this_request` and\n # `create_audit_log_for_request_decorator` decorators to the\n # functions directly, because `is_audit_enabled` depends on\n # the config being loaded\n flask.after_this_request(create_audit_log_for_request_decorator)\n return f(*args, **kwargs)\n\n return wrapper", "def test_enable_local_caching(sdc_builder, sdc_executor, stage_attributes, cluster):\n\n dir_path = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_lowercase))\n table_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n directory = builder.add_stage('Directory', type='origin')\n directory.data_format = 'JSON'\n directory.json_content = 'MULTIPLE_OBJECTS'\n directory.files_directory = dir_path\n directory.file_name_pattern = '*.json'\n directory.file_name_pattern_mode = 'GLOB'\n\n kudu = builder.add_stage('Kudu Lookup')\n kudu.kudu_table_name = f'impala::default.{table_name}'\n kudu.key_columns_mapping = [dict(field='/f1', columnName='id')]\n kudu.column_to_output_field_mapping = [dict(field='/d1', columnName='name', defaultValue='no_name')]\n kudu.missing_lookup_behavior = 'PASS_RECORD_ON'\n kudu.enable_table_caching = True\n kudu.eviction_policy_type = 'EXPIRE_AFTER_WRITE'\n kudu.expiration_time = 1\n kudu.time_unit = 'HOURS'\n kudu.set_attributes(**stage_attributes)\n\n wiretap = builder.add_wiretap()\n\n directory >> kudu >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String),\n impala_partition_by='HASH PARTITIONS 16',\n impala_stored_as='KUDU',\n impala_table_properties={\n 'kudu.master_addresses': f'{cluster.server_host}:{DEFAULT_KUDU_PORT}',\n 'kudu.num_tablet_replicas': '1'\n })\n\n engine = cluster.kudu.engine\n table.create(engine)\n\n try:\n sdc_executor.execute_shell(f'mkdir -p {dir_path}')\n sdc_executor.write_file(os.path.join(dir_path, 'a.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name1\"}))\n\n conn = engine.connect()\n conn.execute(table.insert(), [{'id': 1, 'name': 'name1'}])\n\n status = sdc_executor.start_pipeline(pipeline)\n status.wait_for_pipeline_batch_count(2)\n\n conn.execute(table.update().where(table.c.id == 1).values(name='name2'))\n\n sdc_executor.write_file(os.path.join(dir_path, 'b.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name2\"}))\n\n status.wait_for_pipeline_batch_count(4)\n\n output_records = [record.field for record in wiretap.output_records]\n\n if stage_attributes['enable_local_caching']:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name1'}] == output_records\n else:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name2'}] == output_records\n\n finally:\n try:\n sdc_executor.stop_pipeline(pipeline)\n finally:\n table.drop(engine)\n sdc_executor.execute_shell(f'rm -fr {dir_path}')", "def InsertLog():", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')", "def GetLogs(self):\n raise NotImplementedError()", "def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)", "def logtool(self, action, **options):\n pass", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def ocsaudit_rest_log_command(method, url, url_args, username):\n \n try:\n if method == \"GET\":\n type = cmd_type.type_get\n elif method == \"POST\":\n type = cmd_type.type_post\n elif method == \"PATCH\":\n type = cmd_type.type_patch\n elif method == \"DELETE\":\n type = cmd_type.type_delete\n else:\n type = cmd_type.type_unknown\n print \"Unidentified command type {0}\".format(method)\n \n url = url.split(\"/v1/\",1)[1]\n args = \" \".join(url_args)\n \n ocsaudit_log_command(username, type, cmd_interface.interface_rest, \n url, args)\n except Exception as e:\n print \"ocsaudit_rest_log_command Exception {0}\".format(e)", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def describe_mongo_dblog_config(\n self,\n request: dds_20151201_models.DescribeMongoDBLogConfigRequest,\n ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_mongo_dblog_config_with_options(request, runtime)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )" ]
[ "0.589836", "0.57862264", "0.5479396", "0.53276616", "0.5070259", "0.50430673", "0.5013744", "0.49703205", "0.49509078", "0.4943958", "0.49248308", "0.49248308", "0.49054325", "0.4890508", "0.48703638", "0.48651615", "0.48189864", "0.48181394", "0.48126942", "0.48086238", "0.47714564", "0.47645283", "0.47190982", "0.469417", "0.4682236", "0.4658147", "0.46523142", "0.46427134", "0.4627733", "0.4600334", "0.4583746", "0.45829535", "0.45610845", "0.45487535", "0.45369792", "0.45296195", "0.45200622", "0.45158532", "0.45135257", "0.45087272", "0.44933844", "0.44896442", "0.44780484", "0.44596392", "0.44573265", "0.44514826", "0.44424143", "0.44398478", "0.44218993", "0.4421767", "0.4411959", "0.4401724", "0.43963262", "0.43848306", "0.43804586", "0.43776047", "0.4370529", "0.43691784", "0.436865", "0.4366847", "0.4360559", "0.4349807", "0.434768", "0.4340404", "0.43367457", "0.4336186", "0.4335609", "0.4328269", "0.43278924", "0.43219286", "0.43152776", "0.43145692", "0.43139264", "0.4311371", "0.43076736", "0.4305778", "0.43036142", "0.4301282", "0.4298705", "0.4298513", "0.4293655", "0.42844364", "0.42766058", "0.42735416", "0.42704204", "0.42666212", "0.42655167", "0.4263765", "0.4263708", "0.42612353", "0.42610663", "0.42585126", "0.4258335", "0.42568165", "0.42554635", "0.42527717", "0.42525977", "0.42492634", "0.42488885", "0.42472807", "0.42348155" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. This operation depends on the audit log feature of ApsaraDB for MongoDB. You can enable the audit log feature based on your business needs. For more information, see [Enable the audit log feature](~~59903~~) Starting from January 6, 2022, the official edition of the audit log feature has been launched in all regions, and new applications for the free trial edition have ended. For more information, see [Notice on official launch of the payasyougo audit log feature and no more application for the free trial edition](~~377480~~)
async def describe_mongo_dblog_config_with_options_async( self, request: dds_20151201_models.DescribeMongoDBLogConfigRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeMongoDBLogConfig', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeMongoDBLogConfigResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_audit_monitoring():\n __enable_data_access_logging()\n __enable_log_streaming()\n __create_audit_alerts()\n __get_incidents_history()", "def __enable_data_access_logging():\n _tempFile = \"tmp_audit_config.json\"\n\n auditConfig = {\n \"auditConfigs\": [\n {\n \"auditLogConfigs\": [\n {\n \"logType\": \"ADMIN_READ\"\n },\n {\n \"logType\": \"DATA_WRITE\"\n },\n {\n \"logType\": \"DATA_READ\"\n }\n ],\n \"service\": \"allServices\",\n }\n ]\n }\n\n # get current policy\n run_command('gcloud projects get-iam-policy {} --format=json >>{}'.format(PROJECT_ID, _tempFile))\n\n # merge it with above-defined config\n merge_JSON(auditConfig, _tempFile)\n\n # set the policy\n run_command('gcloud projects set-iam-policy {} {}'.format(PROJECT_ID, _tempFile))\n\n # delete the temp file\n run_command('rm {}'.format(_tempFile))", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def test_mongodb_oplog_origin(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n time_now = int(time.time())\n mongodb_oplog = pipeline_builder.add_stage('MongoDB Oplog')\n database_name = get_random_string(ascii_letters, 10)\n # Specify that MongoDB Oplog needs to read changes occuring after time_now.\n mongodb_oplog.set_attributes(collection='oplog.rs', initial_timestamp_in_secs=time_now, initial_ordinal=1)\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_oplog >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Insert documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are inserted in that collection.\n mongodb_database = mongodb.engine[database_name]\n mongodb_collection = mongodb_database[get_random_string(ascii_letters, 10)]\n input_rec_count = 6\n inserted_list = mongodb_collection.insert_many([{'x': i} for i in range(input_rec_count)])\n assert len(inserted_list.inserted_ids) == input_rec_count\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(snapshot[mongodb_oplog].output) == input_rec_count\n for record in list(enumerate(snapshot[mongodb_oplog].output)):\n assert record[1].value['value']['o']['value']['x']['value'] == str(record[0])\n # Verify the operation type is 'i' which is for 'insert' since we inserted the records earlier.\n assert record[1].value['value']['op']['value'] == 'i'\n assert record[1].value['value']['ts']['value']['timestamp']['value'] > time_now\n\n finally:\n logger.info('Dropping %s database...', database_name)\n mongodb.engine.drop_database(database_name)", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def get_audit(self, query, session):\n raise NotImplementedError()", "def test_old_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 10, 9)\n assert key.audit_state == 'stagnant_expire'", "def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def view_audit_log(_) -> int:\n return 1 << 7", "def view_audit_log(_) -> int:\n return 1 << 7", "def test_normal(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n k = Key('username', 'keyid', 'Active', created, last_used)\n k.audit(60, 80, 20, 19)\n assert k.creation_age == 15\n assert k.audit_state == 'good'", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def log(msg):\n\n print('datastore: %s' % msg)", "def AddSqlServerAudit(parser):\n parser.add_argument(\n '--audit-bucket-path',\n required=False,\n help=(\n 'The location, as a Cloud Storage bucket, to which audit files are '\n 'uploaded. The URI is in the form gs://bucketName/folderName. Only '\n 'available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-retention-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='7d'),\n required=False,\n help=(\n 'The number of days for audit log retention on disk, for example, 3d'\n 'for 3 days. Only available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-upload-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='720m'),\n required=False,\n help=(\n 'How often to upload audit logs (audit files), for example, 30m'\n 'for 30 minutes. Only available for SQL Server instances.'\n ),\n )", "def setup_tap_mongodb(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mongodb.sh')\n self._run_command(db_script)", "def logs(self, container: Container) -> str:", "def add_log4mongo():\n remove_log4mongo()\n root = logging.getLogger()\n root.addHandler(MongoHandler())", "def _add_connection_info(report_kvs, db):\n report_kvs['Flavor'] = 'mongodb'\n report_kvs['Database'] = db.name\n report_kvs['RemoteHost'] = db.connection.host\n report_kvs['RemotePort'] = db.connection.port", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def update_log(ident, document):\n logs_col.update_one({\"_id\": ident}, {\"$set\": document})", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def audit(msg):\n global auditLogger\n if auditLogger is not None:\n auditLogger.info(msg)", "def get_log(ident):\n return logs_col.find_one({\"_id\": ObjectId(ident)})", "def audit(audit_code, audit_str, request, system_initiated=False):\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if system_initiated is False:\n ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n now, err = datetime_utils.get_epoch(when='now', num_previous_days=0)\n if err:\n raise Exception(er)\n if system_initiated:\n username = 'System'\n source_ip = 'System'\n else:\n username = request.user.username\n source_ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n command_list = []\n cmd = [\n 'insert into audit(audit_time, username, source_ip, audit_code, audit_str) values (?,?,?,?,?)', (now, username, source_ip, audit_code, audit_str,)]\n command_list.append(cmd)\n audit_id, err = db.execute_iud(db_path, command_list, get_rowid=True)\n if err:\n raise Exception(err)\n ret, err = event_notifications.record_event_notification_holding(\n event_id=audit_id, event_type_id=2)\n if err:\n raise Exception(err)\n\n except Exception, e:\n return False, 'Error performing an audit operation : %s' % str(e)\n else:\n return True, None", "def event_log(self):\n pass", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def test_mongo_logging_client_persists_log():\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def to(cls, database=\"AUDIT\", collection=\"log\",\n mongodb_uri=\"mongodb://localhost:27017\", level=logging.NOTSET):\n return cls(mongodb_uri, database, collection, level)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def audit(self):\n self.ping()", "def db_for_write(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def setup_logging():\n log.setup('keystone')", "def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')", "def logQuote(cmdDict):\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def cf_log_analytics_data_plane(cli_ctx, _):\n from azure.loganalytics import LogAnalyticsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.log_analytics_resource_id)\n api_version = 'v1'\n return LogAnalyticsDataClient(cred,\n base_url=cli_ctx.cloud.endpoints.log_analytics_resource_id + '/' + api_version)", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def test_aud_from_log_ignores_index():\n assert True", "def main():\n\n args = cli.get_args()\n\n audit = {}\n try:\n service_instance,content = connect_vc(args.host,args.user,args.password,args.port)\n\n if sys.stdout.isatty():\n print(\"vCenter: %s\" % args.host)\n \n content = service_instance.RetrieveContent()\n\n container = content.rootFolder # starting point to look into\n datacenters = get_datacenters(content)\n for dc in datacenters:\n datacenters[dc]['clusters'] = get_clusters(datacenters[dc]['dc'])\n\n datacenters[dc]['vms'] = get_vms(datacenters[dc]['dc'].vmFolder)\n \n get_nets(dc)\n get_dstores(dc)\n\n vmcount=0\n \n for dc in datacenters:\n for vm in sorted(datacenters[dc]['vms'],key=lambda s: s.lower()):\n vmcount+=1\n v = datacenters[dc]['vms'][vm]\n c = find_cluster(datacenters[dc]['clusters'],v.runtime.host.name)\n vort = \"Template\" if v.summary.config.template == True else \"VM\"\n audit[v.name]={}\n audit[v.name]['datacenter'] = dc\n audit[v.name]['cluster'] = c\n audit[v.name]['type'] = vort\n audit[v.name]['hostname'] = v.summary.guest.hostName\n audit[v.name]['guestid'] = v.config.guestId\n audit[v.name]['fullname'] = v.summary.config.guestFullName\n audit[v.name]['state'] = v.runtime.powerState\n audit[v.name]['ip'] = v.guest.ipAddress\n if sys.stdout.isatty():\n print(vmcount,\"Guests processed\",end='\\r')\n sys.stdout.flush()\n# print(\"%-15s:%-10s %-8s %-30s %-30s %s %s %s %s\" % (dc, c, vort,v.name,v.summary.guest.hostName, v.config.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress ))\n #print vort, v.name, v.summary.guest.hostName, v.guest.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress #,v.summary\n# print(\"\\ncount:\",vmcount)\n \n print(json.dumps(audit, indent=4, separators=(',', ': ')))\n \n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')", "def log_image(data_category, image_name, path=None, plot=None, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricImage({})\".format(image_name))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_image(image_name, path, plot, **kwargs)\n run.flush()", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")", "def enable_access_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_access_logging\")", "def modify_audit_log_filter_with_options(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.filter):\n query['Filter'] = request.filter\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_type):\n query['RoleType'] = request.role_type\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyAuditLogFilter',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyAuditLogFilterResponse(),\n self.call_api(params, req, runtime)\n )", "def __init__(self) -> None:\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)", "def test_rotate_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 20, 10)\n assert key.audit_state == 'stagnant'", "def test_read_namespaced_deployment_log_log(self):\n pass", "def test_resourcelog(client, test_database, session):\n test_database.refresh()\n\n usersession_id = 1\n remote_addr = \"127.0.0.1\"\n\n # Without payload\n r = client.get(\"/api/v1/config/\")\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 2 # 2 entries since API did a login as first entry\n\n rl = rlogs[-1]\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"GET\"\n assert rl.resource == \"/api/v1/config/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # With payload\n\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 4 # 4 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 200\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Make sure /login doesn't log passwords\n payload_data = {\"username\": \"abc\", \"password\": \"123\"}\n r = client.post(\"/api/v1/users/actions/login/\", payload_data)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 6 # 6 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 401 # User doesn't exist\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/users/actions/login/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Test logging when not logged in\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n client.logout()\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data, username=None)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 9 # logout counts as 1\n\n rl = rlogs[-1]\n assert statuscode == 403\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id is None\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert isinstance(rl.time, datetime.datetime)", "def upgrade_to_21():\n\n def update_project_template(template):\n new_template = {'acquisitions': []}\n for a in template.get('acquisitions', []):\n new_a = {'minimum': a['minimum']}\n properties = a['schema']['properties']\n if 'measurement' in properties:\n m_req = properties['measurement']['pattern']\n m_req = re.sub('^\\(\\?i\\)', '', m_req)\n new_a['files']=[{'measurement': m_req, 'minimum': 1}]\n if 'label' in properties:\n l_req = properties['label']['pattern']\n l_req = re.sub('^\\(\\?i\\)', '', l_req)\n new_a['label'] = l_req\n new_template['acquisitions'].append(new_a)\n\n return new_template\n\n def dm_v2_updates(cont_list, cont_name):\n for container in cont_list:\n\n query = {'_id': container['_id']}\n update = {'$rename': {'metadata': 'info'}}\n\n if cont_name == 'projects' and container.get('template'):\n new_template = update_project_template(json.loads(container.get('template')))\n update['$set'] = {'template': new_template}\n\n\n if cont_name == 'sessions':\n update['$rename'].update({'subject.metadata': 'subject.info'})\n\n\n measurement = None\n modality = None\n info = None\n if cont_name == 'acquisitions':\n update['$unset'] = {'instrument': '', 'measurement': ''}\n measurement = container.get('measurement', None)\n modality = container.get('instrument', None)\n info = container.get('metadata', None)\n if info:\n config.db.acquisitions.update_one(query, {'$set': {'metadata': {}}})\n\n\n # From mongo docs: '$rename does not work if these fields are in array elements.'\n files = container.get('files')\n if files is not None:\n updated_files = []\n for file_ in files:\n file_['info'] = {}\n if 'metadata' in file_:\n file_['info'] = file_.pop('metadata', None)\n if 'instrument' in file_:\n file_['modality'] = file_.pop('instrument', None)\n if measurement:\n # Move the acquisition's measurement to all files\n if file_.get('measurements'):\n file_['measurements'].append(measurement)\n else:\n file_['measurements'] = [measurement]\n if info and file_.get('type', '') == 'dicom':\n # This is going to be the dicom header info\n updated_info = info\n updated_info.update(file_['info'])\n file_['info'] = updated_info\n if modality and not file_.get('modality'):\n file_['modality'] = modality\n\n updated_files.append(file_)\n if update.get('$set'):\n update['$set']['files'] = updated_files\n else:\n update['$set'] = {'files': updated_files}\n\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$or':[{'files.metadata': { '$exists': True}},\n {'metadata': { '$exists': True}},\n {'files.instrument': { '$exists': True}}]}\n\n dm_v2_updates(config.db.collections.find(query), 'collections')\n\n query['$or'].append({'template': { '$exists': True}})\n dm_v2_updates(config.db.projects.find({}), 'projects')\n\n query['$or'].append({'subject': { '$exists': True}})\n dm_v2_updates(config.db.sessions.find(query), 'sessions')\n\n query['$or'].append({'instrument': { '$exists': True}})\n query['$or'].append({'measurement': { '$exists': True}})\n dm_v2_updates(config.db.acquisitions.find(query), 'acquisitions')", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def search_log(doc_type, query):\n if query:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ],\n \"query\": {\n \"term\": {\n \"_all\": \"the\"\n }\n }\n }\n else:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ]\n }\n if doc_type:\n print \"condition 1 true\"\n res = es.search(index=\"logs\", doc_type=str(doc_type).strip(), body=body)\n else:\n res = es.search(index=\"logs\", body=body)\n\n data = []\n if not res.get('timed_out'):\n for item in res[\"hits\"][\"hits\"]:\n data.append({\n 'client_ip': item['_source'].get('client_ip'),\n 'client': item['_source'].get('client'),\n 'log': item['_source'].get('log'),\n 'service': item['_source'].get('service'),\n })\n response = {\"data\": data}\n return response", "def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "async def modlog(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n entity=self.bot.get_command(\"modlog\")\r\n p = await HelpPaginator.from_command(ctx, entity)\r\n await p.paginate()\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n else:\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def on_a(self):\r\n self.log()", "def record(params, git_info = {}):\n print \"recording...\"\n\n try:\n # connect to MongoDB\n # config = json.load(open(os.environ.get('HOME') + \"/sandbox/config.json\"))\n config = json.load(open(os.environ.get('HOME') + \"/LSEMS/config.json\"))\n try:\n client = MongoClient(config[\"mongodb_url\"])\n except Exception as e:\n raise Exception(\"fail to connect to given MongoDB address: \" + DB_addr)\n\n # check and run the thing\n missing = checkKeys(params, ['data_set', 'src', 'type', 'param'])\n if len(missing) != 0:\n raise Exception(\"missing attribute\"+('s' if len(missing)!=1 else '')+\": \"+str(missing))\n\n params['time'] = asctime()\n params['commit_id'] = git_info['commit_id']\n params['name'] = git_info['name']\n repo_name = git_info['repo_name']\n params['repo_name'] = repo_name\n user = verifyUser(client, git_info['name'])\n\n exp = user.find_one({'exp_name': repo_name})\n if not exp:\n print 'adding new experiment '+repo_name+'...'\n user.insert({'exp_name': repo_name, 'exp_records':[]})\n old_records = user.find_one({'exp_name': repo_name})['exp_records']\n user.update({'exp_name': repo_name}, {'$set': {'exp_records': old_records + [params]}})\n\n print params\n #user.insert(params)\n client.close()\n return True,params\n except Exception as e:\n print e\n print \"Aborting...\"\n return False,{}", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def add_audit_cols(df, changedt):\n df = df.withColumn(\"operation\", f.lit(\"I\")) \\\n .withColumn(\"processeddate\", f.current_timestamp().cast(\"String\")) \\\n .withColumn(\"changedate\", f.lit(changedt)) \\\n .withColumn('changedate_year', f.year('changedate').cast(\"String\")) \\\n .withColumn('changedate_month', f.month('changedate').cast(\"String\")) \\\n .withColumn('changedate_day', f.dayofmonth('changedate').cast(\"String\"))\n return df", "def command(ctx):\n ctx.setup_logger(format='')", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def test_sudoers_audit(host):\n with host.sudo():\n sudoers_access = host.run(\"touch /etc/sudoers\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/sudoers\\\"\")\n assert audit_log.stdout", "def sync_volumeaccessright_record( vac ):\n \n syndicate_caps = \"UNKNOWN\" # for exception handling\n \n # get arguments\n config = observer_core.get_config()\n principal_id = vac.owner_id.email\n volume_name = vac.volume.name\n syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) \n \n logger.info( \"Sync VolumeAccessRight for (%s, %s)\" % (principal_id, volume_name) )\n \n # validate config\n try:\n observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET\")\n raise e\n \n # ensure the user exists and has credentials\n try:\n rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )\n assert rc is True, \"Failed to ensure principal %s exists (rc = %s,%s)\" % (principal_id, rc, user)\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to ensure user '%s' exists\" % principal_id )\n raise e\n\n # grant the slice-owning user the ability to provision UGs in this Volume\n try:\n rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )\n assert rc is True, \"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name)\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name))\n raise e\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Faoed to ensure user %s can access Volume %s with rights %s\" % (principal_id, volume_name, syndicate_caps))\n raise e\n\n return True", "def test_log_extra_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step_extra(len, print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def __init__(__self__, *,\n copy_log_details_type: str,\n disk_serial_number: str,\n error_log_link: str,\n verbose_log_link: str):\n pulumi.set(__self__, \"copy_log_details_type\", 'DataBoxDisk')\n pulumi.set(__self__, \"disk_serial_number\", disk_serial_number)\n pulumi.set(__self__, \"error_log_link\", error_log_link)\n pulumi.set(__self__, \"verbose_log_link\", verbose_log_link)", "def log_all(self):\n self.save_raw()\n self.log()", "def upgrade_to_15():\n query = {}\n query['$or'] = [\n {'timestamp':''},\n {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$not': {'$type':2}}},\n {'timestamp': {'$not': {'$type':9}}}\n ]}\n ]\n unset = {'$unset': {'timestamp': ''}}\n\n config.db.sessions.update_many(query, unset)\n config.db.acquisitions.update_many(query, unset)\n\n query = {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$type':2}}\n ]}\n sessions = config.db.sessions.find(query)\n for s in sessions:\n try:\n fixed_timestamp = dateutil.parser.parse(s['timestamp'])\n except:\n config.db.sessions.update_one({'_id': s['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': s['_id']}, {'$set': {'timestamp': fixed_timestamp}})\n\n acquisitions = config.db.acquisitions.find(query)\n for a in acquisitions:\n try:\n fixed_timestamp = dateutil.parser.parse(a['timestamp'])\n except:\n config.db.sessions.update_one({'_id': a['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': a['_id']}, {'$set': {'timestamp': fixed_timestamp}})", "def clb_access_logging_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n if elb.describe_load_balancer_attributes(LoadBalancerName=clbName)[\"LoadBalancerAttributes\"][\"AccessLog\"][\"Enabled\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def test_dump_collection(self):\n\n test_oplog, primary_conn, search_ts = self.get_oplog_thread()\n solr = DocManager()\n test_oplog.doc_manager = solr\n\n #with documents\n primary_conn['test']['test'].insert({'name': 'paulie'})\n search_ts = test_oplog.get_last_oplog_timestamp()\n test_oplog.dump_collection()\n\n test_oplog.doc_manager.commit()\n solr_results = solr._search()\n self.assertEqual(len(solr_results), 1)\n solr_doc = solr_results[0]\n self.assertEqual(long_to_bson_ts(solr_doc['_ts']), search_ts)\n self.assertEqual(solr_doc['name'], 'paulie')\n self.assertEqual(solr_doc['ns'], 'test.test')", "def save_to_mongo(self, collection='pending_trailers'):\n Database.insert(collection=collection, data=self.json())", "def test_post_add_log_event(self):\n pass", "def enable_audit_logging(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def create_audit_log_for_request_decorator(response):\n return create_audit_log_for_request(response)\n\n if is_audit_enabled():\n # we can't add the `after_this_request` and\n # `create_audit_log_for_request_decorator` decorators to the\n # functions directly, because `is_audit_enabled` depends on\n # the config being loaded\n flask.after_this_request(create_audit_log_for_request_decorator)\n return f(*args, **kwargs)\n\n return wrapper", "def InsertLog():", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def test_enable_local_caching(sdc_builder, sdc_executor, stage_attributes, cluster):\n\n dir_path = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_lowercase))\n table_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n directory = builder.add_stage('Directory', type='origin')\n directory.data_format = 'JSON'\n directory.json_content = 'MULTIPLE_OBJECTS'\n directory.files_directory = dir_path\n directory.file_name_pattern = '*.json'\n directory.file_name_pattern_mode = 'GLOB'\n\n kudu = builder.add_stage('Kudu Lookup')\n kudu.kudu_table_name = f'impala::default.{table_name}'\n kudu.key_columns_mapping = [dict(field='/f1', columnName='id')]\n kudu.column_to_output_field_mapping = [dict(field='/d1', columnName='name', defaultValue='no_name')]\n kudu.missing_lookup_behavior = 'PASS_RECORD_ON'\n kudu.enable_table_caching = True\n kudu.eviction_policy_type = 'EXPIRE_AFTER_WRITE'\n kudu.expiration_time = 1\n kudu.time_unit = 'HOURS'\n kudu.set_attributes(**stage_attributes)\n\n wiretap = builder.add_wiretap()\n\n directory >> kudu >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String),\n impala_partition_by='HASH PARTITIONS 16',\n impala_stored_as='KUDU',\n impala_table_properties={\n 'kudu.master_addresses': f'{cluster.server_host}:{DEFAULT_KUDU_PORT}',\n 'kudu.num_tablet_replicas': '1'\n })\n\n engine = cluster.kudu.engine\n table.create(engine)\n\n try:\n sdc_executor.execute_shell(f'mkdir -p {dir_path}')\n sdc_executor.write_file(os.path.join(dir_path, 'a.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name1\"}))\n\n conn = engine.connect()\n conn.execute(table.insert(), [{'id': 1, 'name': 'name1'}])\n\n status = sdc_executor.start_pipeline(pipeline)\n status.wait_for_pipeline_batch_count(2)\n\n conn.execute(table.update().where(table.c.id == 1).values(name='name2'))\n\n sdc_executor.write_file(os.path.join(dir_path, 'b.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name2\"}))\n\n status.wait_for_pipeline_batch_count(4)\n\n output_records = [record.field for record in wiretap.output_records]\n\n if stage_attributes['enable_local_caching']:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name1'}] == output_records\n else:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name2'}] == output_records\n\n finally:\n try:\n sdc_executor.stop_pipeline(pipeline)\n finally:\n table.drop(engine)\n sdc_executor.execute_shell(f'rm -fr {dir_path}')", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def GetLogs(self):\n raise NotImplementedError()", "def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')", "def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)", "def logtool(self, action, **options):\n pass", "def describe_mongo_dblog_config(\n self,\n request: dds_20151201_models.DescribeMongoDBLogConfigRequest,\n ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_mongo_dblog_config_with_options(request, runtime)", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def ocsaudit_rest_log_command(method, url, url_args, username):\n \n try:\n if method == \"GET\":\n type = cmd_type.type_get\n elif method == \"POST\":\n type = cmd_type.type_post\n elif method == \"PATCH\":\n type = cmd_type.type_patch\n elif method == \"DELETE\":\n type = cmd_type.type_delete\n else:\n type = cmd_type.type_unknown\n print \"Unidentified command type {0}\".format(method)\n \n url = url.split(\"/v1/\",1)[1]\n args = \" \".join(url_args)\n \n ocsaudit_log_command(username, type, cmd_interface.interface_rest, \n url, args)\n except Exception as e:\n print \"ocsaudit_rest_log_command Exception {0}\".format(e)", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def describe_mongo_dblog_config_with_options(\n self,\n request: dds_20151201_models.DescribeMongoDBLogConfigRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeMongoDBLogConfig',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeMongoDBLogConfigResponse(),\n self.call_api(params, req, runtime)\n )" ]
[ "0.58979994", "0.5785542", "0.5478132", "0.53298086", "0.5068395", "0.50443417", "0.5012571", "0.49672645", "0.4954462", "0.49442938", "0.4924835", "0.4924835", "0.49021187", "0.48921368", "0.4870133", "0.48647827", "0.48227146", "0.48181987", "0.48176298", "0.48098782", "0.47750485", "0.47671366", "0.47184798", "0.4691414", "0.46824726", "0.46613085", "0.46508464", "0.4642479", "0.46275452", "0.46005014", "0.45852172", "0.45844045", "0.45633274", "0.4548719", "0.4534788", "0.4529743", "0.45182583", "0.45153734", "0.45137933", "0.45098406", "0.44932467", "0.4490264", "0.44781154", "0.4457276", "0.44572684", "0.444827", "0.44429126", "0.4441962", "0.4423033", "0.44219932", "0.4410732", "0.44020614", "0.43944114", "0.43860224", "0.43800586", "0.43767482", "0.43710747", "0.43692446", "0.4367495", "0.43638027", "0.43597147", "0.43487754", "0.43479532", "0.43392855", "0.43373746", "0.43354854", "0.43345004", "0.43285573", "0.43275705", "0.4321296", "0.43160042", "0.43157604", "0.43135425", "0.4310339", "0.43065983", "0.43041477", "0.4301492", "0.43011114", "0.4298716", "0.42984465", "0.42954382", "0.42836523", "0.42769638", "0.42750305", "0.42724177", "0.42662865", "0.42653164", "0.42632982", "0.4261758", "0.42613357", "0.4259449", "0.42581794", "0.4257313", "0.4255564", "0.4255154", "0.42539102", "0.4253152", "0.42520702", "0.4248894", "0.4246904", "0.42352083" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. This operation depends on the audit log feature of ApsaraDB for MongoDB. You can enable the audit log feature based on your business needs. For more information, see [Enable the audit log feature](~~59903~~) Starting from January 6, 2022, the official edition of the audit log feature has been launched in all regions, and new applications for the free trial edition have ended. For more information, see [Notice on official launch of the payasyougo audit log feature and no more application for the free trial edition](~~377480~~)
def describe_mongo_dblog_config( self, request: dds_20151201_models.DescribeMongoDBLogConfigRequest, ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse: runtime = util_models.RuntimeOptions() return self.describe_mongo_dblog_config_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_audit_monitoring():\n __enable_data_access_logging()\n __enable_log_streaming()\n __create_audit_alerts()\n __get_incidents_history()", "def __enable_data_access_logging():\n _tempFile = \"tmp_audit_config.json\"\n\n auditConfig = {\n \"auditConfigs\": [\n {\n \"auditLogConfigs\": [\n {\n \"logType\": \"ADMIN_READ\"\n },\n {\n \"logType\": \"DATA_WRITE\"\n },\n {\n \"logType\": \"DATA_READ\"\n }\n ],\n \"service\": \"allServices\",\n }\n ]\n }\n\n # get current policy\n run_command('gcloud projects get-iam-policy {} --format=json >>{}'.format(PROJECT_ID, _tempFile))\n\n # merge it with above-defined config\n merge_JSON(auditConfig, _tempFile)\n\n # set the policy\n run_command('gcloud projects set-iam-policy {} {}'.format(PROJECT_ID, _tempFile))\n\n # delete the temp file\n run_command('rm {}'.format(_tempFile))", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def test_mongodb_oplog_origin(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n time_now = int(time.time())\n mongodb_oplog = pipeline_builder.add_stage('MongoDB Oplog')\n database_name = get_random_string(ascii_letters, 10)\n # Specify that MongoDB Oplog needs to read changes occuring after time_now.\n mongodb_oplog.set_attributes(collection='oplog.rs', initial_timestamp_in_secs=time_now, initial_ordinal=1)\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_oplog >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Insert documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are inserted in that collection.\n mongodb_database = mongodb.engine[database_name]\n mongodb_collection = mongodb_database[get_random_string(ascii_letters, 10)]\n input_rec_count = 6\n inserted_list = mongodb_collection.insert_many([{'x': i} for i in range(input_rec_count)])\n assert len(inserted_list.inserted_ids) == input_rec_count\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(snapshot[mongodb_oplog].output) == input_rec_count\n for record in list(enumerate(snapshot[mongodb_oplog].output)):\n assert record[1].value['value']['o']['value']['x']['value'] == str(record[0])\n # Verify the operation type is 'i' which is for 'insert' since we inserted the records earlier.\n assert record[1].value['value']['op']['value'] == 'i'\n assert record[1].value['value']['ts']['value']['timestamp']['value'] > time_now\n\n finally:\n logger.info('Dropping %s database...', database_name)\n mongodb.engine.drop_database(database_name)", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def get_audit(self, query, session):\n raise NotImplementedError()", "def test_old_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 10, 9)\n assert key.audit_state == 'stagnant_expire'", "def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def view_audit_log(_) -> int:\n return 1 << 7", "def view_audit_log(_) -> int:\n return 1 << 7", "def test_normal(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n k = Key('username', 'keyid', 'Active', created, last_used)\n k.audit(60, 80, 20, 19)\n assert k.creation_age == 15\n assert k.audit_state == 'good'", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def log(msg):\n\n print('datastore: %s' % msg)", "def AddSqlServerAudit(parser):\n parser.add_argument(\n '--audit-bucket-path',\n required=False,\n help=(\n 'The location, as a Cloud Storage bucket, to which audit files are '\n 'uploaded. The URI is in the form gs://bucketName/folderName. Only '\n 'available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-retention-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='7d'),\n required=False,\n help=(\n 'The number of days for audit log retention on disk, for example, 3d'\n 'for 3 days. Only available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-upload-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='720m'),\n required=False,\n help=(\n 'How often to upload audit logs (audit files), for example, 30m'\n 'for 30 minutes. Only available for SQL Server instances.'\n ),\n )", "def setup_tap_mongodb(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mongodb.sh')\n self._run_command(db_script)", "def logs(self, container: Container) -> str:", "def add_log4mongo():\n remove_log4mongo()\n root = logging.getLogger()\n root.addHandler(MongoHandler())", "def _add_connection_info(report_kvs, db):\n report_kvs['Flavor'] = 'mongodb'\n report_kvs['Database'] = db.name\n report_kvs['RemoteHost'] = db.connection.host\n report_kvs['RemotePort'] = db.connection.port", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def update_log(ident, document):\n logs_col.update_one({\"_id\": ident}, {\"$set\": document})", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def audit(msg):\n global auditLogger\n if auditLogger is not None:\n auditLogger.info(msg)", "def get_log(ident):\n return logs_col.find_one({\"_id\": ObjectId(ident)})", "def audit(audit_code, audit_str, request, system_initiated=False):\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if system_initiated is False:\n ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n now, err = datetime_utils.get_epoch(when='now', num_previous_days=0)\n if err:\n raise Exception(er)\n if system_initiated:\n username = 'System'\n source_ip = 'System'\n else:\n username = request.user.username\n source_ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n command_list = []\n cmd = [\n 'insert into audit(audit_time, username, source_ip, audit_code, audit_str) values (?,?,?,?,?)', (now, username, source_ip, audit_code, audit_str,)]\n command_list.append(cmd)\n audit_id, err = db.execute_iud(db_path, command_list, get_rowid=True)\n if err:\n raise Exception(err)\n ret, err = event_notifications.record_event_notification_holding(\n event_id=audit_id, event_type_id=2)\n if err:\n raise Exception(err)\n\n except Exception, e:\n return False, 'Error performing an audit operation : %s' % str(e)\n else:\n return True, None", "def event_log(self):\n pass", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def test_mongo_logging_client_persists_log():\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message", "def to(cls, database=\"AUDIT\", collection=\"log\",\n mongodb_uri=\"mongodb://localhost:27017\", level=logging.NOTSET):\n return cls(mongodb_uri, database, collection, level)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def audit(self):\n self.ping()", "def db_for_write(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def setup_logging():\n log.setup('keystone')", "def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')", "def logQuote(cmdDict):\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def cf_log_analytics_data_plane(cli_ctx, _):\n from azure.loganalytics import LogAnalyticsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.log_analytics_resource_id)\n api_version = 'v1'\n return LogAnalyticsDataClient(cred,\n base_url=cli_ctx.cloud.endpoints.log_analytics_resource_id + '/' + api_version)", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def test_aud_from_log_ignores_index():\n assert True", "def main():\n\n args = cli.get_args()\n\n audit = {}\n try:\n service_instance,content = connect_vc(args.host,args.user,args.password,args.port)\n\n if sys.stdout.isatty():\n print(\"vCenter: %s\" % args.host)\n \n content = service_instance.RetrieveContent()\n\n container = content.rootFolder # starting point to look into\n datacenters = get_datacenters(content)\n for dc in datacenters:\n datacenters[dc]['clusters'] = get_clusters(datacenters[dc]['dc'])\n\n datacenters[dc]['vms'] = get_vms(datacenters[dc]['dc'].vmFolder)\n \n get_nets(dc)\n get_dstores(dc)\n\n vmcount=0\n \n for dc in datacenters:\n for vm in sorted(datacenters[dc]['vms'],key=lambda s: s.lower()):\n vmcount+=1\n v = datacenters[dc]['vms'][vm]\n c = find_cluster(datacenters[dc]['clusters'],v.runtime.host.name)\n vort = \"Template\" if v.summary.config.template == True else \"VM\"\n audit[v.name]={}\n audit[v.name]['datacenter'] = dc\n audit[v.name]['cluster'] = c\n audit[v.name]['type'] = vort\n audit[v.name]['hostname'] = v.summary.guest.hostName\n audit[v.name]['guestid'] = v.config.guestId\n audit[v.name]['fullname'] = v.summary.config.guestFullName\n audit[v.name]['state'] = v.runtime.powerState\n audit[v.name]['ip'] = v.guest.ipAddress\n if sys.stdout.isatty():\n print(vmcount,\"Guests processed\",end='\\r')\n sys.stdout.flush()\n# print(\"%-15s:%-10s %-8s %-30s %-30s %s %s %s %s\" % (dc, c, vort,v.name,v.summary.guest.hostName, v.config.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress ))\n #print vort, v.name, v.summary.guest.hostName, v.guest.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress #,v.summary\n# print(\"\\ncount:\",vmcount)\n \n print(json.dumps(audit, indent=4, separators=(',', ': ')))\n \n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')", "def log_image(data_category, image_name, path=None, plot=None, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricImage({})\".format(image_name))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_image(image_name, path, plot, **kwargs)\n run.flush()", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")", "def enable_access_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_access_logging\")", "def modify_audit_log_filter_with_options(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.filter):\n query['Filter'] = request.filter\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_type):\n query['RoleType'] = request.role_type\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyAuditLogFilter',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyAuditLogFilterResponse(),\n self.call_api(params, req, runtime)\n )", "def __init__(self) -> None:\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)", "def test_rotate_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 20, 10)\n assert key.audit_state == 'stagnant'", "def test_read_namespaced_deployment_log_log(self):\n pass", "def test_resourcelog(client, test_database, session):\n test_database.refresh()\n\n usersession_id = 1\n remote_addr = \"127.0.0.1\"\n\n # Without payload\n r = client.get(\"/api/v1/config/\")\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 2 # 2 entries since API did a login as first entry\n\n rl = rlogs[-1]\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"GET\"\n assert rl.resource == \"/api/v1/config/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # With payload\n\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 4 # 4 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 200\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Make sure /login doesn't log passwords\n payload_data = {\"username\": \"abc\", \"password\": \"123\"}\n r = client.post(\"/api/v1/users/actions/login/\", payload_data)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 6 # 6 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 401 # User doesn't exist\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/users/actions/login/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Test logging when not logged in\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n client.logout()\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data, username=None)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 9 # logout counts as 1\n\n rl = rlogs[-1]\n assert statuscode == 403\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id is None\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert isinstance(rl.time, datetime.datetime)", "def upgrade_to_21():\n\n def update_project_template(template):\n new_template = {'acquisitions': []}\n for a in template.get('acquisitions', []):\n new_a = {'minimum': a['minimum']}\n properties = a['schema']['properties']\n if 'measurement' in properties:\n m_req = properties['measurement']['pattern']\n m_req = re.sub('^\\(\\?i\\)', '', m_req)\n new_a['files']=[{'measurement': m_req, 'minimum': 1}]\n if 'label' in properties:\n l_req = properties['label']['pattern']\n l_req = re.sub('^\\(\\?i\\)', '', l_req)\n new_a['label'] = l_req\n new_template['acquisitions'].append(new_a)\n\n return new_template\n\n def dm_v2_updates(cont_list, cont_name):\n for container in cont_list:\n\n query = {'_id': container['_id']}\n update = {'$rename': {'metadata': 'info'}}\n\n if cont_name == 'projects' and container.get('template'):\n new_template = update_project_template(json.loads(container.get('template')))\n update['$set'] = {'template': new_template}\n\n\n if cont_name == 'sessions':\n update['$rename'].update({'subject.metadata': 'subject.info'})\n\n\n measurement = None\n modality = None\n info = None\n if cont_name == 'acquisitions':\n update['$unset'] = {'instrument': '', 'measurement': ''}\n measurement = container.get('measurement', None)\n modality = container.get('instrument', None)\n info = container.get('metadata', None)\n if info:\n config.db.acquisitions.update_one(query, {'$set': {'metadata': {}}})\n\n\n # From mongo docs: '$rename does not work if these fields are in array elements.'\n files = container.get('files')\n if files is not None:\n updated_files = []\n for file_ in files:\n file_['info'] = {}\n if 'metadata' in file_:\n file_['info'] = file_.pop('metadata', None)\n if 'instrument' in file_:\n file_['modality'] = file_.pop('instrument', None)\n if measurement:\n # Move the acquisition's measurement to all files\n if file_.get('measurements'):\n file_['measurements'].append(measurement)\n else:\n file_['measurements'] = [measurement]\n if info and file_.get('type', '') == 'dicom':\n # This is going to be the dicom header info\n updated_info = info\n updated_info.update(file_['info'])\n file_['info'] = updated_info\n if modality and not file_.get('modality'):\n file_['modality'] = modality\n\n updated_files.append(file_)\n if update.get('$set'):\n update['$set']['files'] = updated_files\n else:\n update['$set'] = {'files': updated_files}\n\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$or':[{'files.metadata': { '$exists': True}},\n {'metadata': { '$exists': True}},\n {'files.instrument': { '$exists': True}}]}\n\n dm_v2_updates(config.db.collections.find(query), 'collections')\n\n query['$or'].append({'template': { '$exists': True}})\n dm_v2_updates(config.db.projects.find({}), 'projects')\n\n query['$or'].append({'subject': { '$exists': True}})\n dm_v2_updates(config.db.sessions.find(query), 'sessions')\n\n query['$or'].append({'instrument': { '$exists': True}})\n query['$or'].append({'measurement': { '$exists': True}})\n dm_v2_updates(config.db.acquisitions.find(query), 'acquisitions')", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "def search_log(doc_type, query):\n if query:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ],\n \"query\": {\n \"term\": {\n \"_all\": \"the\"\n }\n }\n }\n else:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ]\n }\n if doc_type:\n print \"condition 1 true\"\n res = es.search(index=\"logs\", doc_type=str(doc_type).strip(), body=body)\n else:\n res = es.search(index=\"logs\", body=body)\n\n data = []\n if not res.get('timed_out'):\n for item in res[\"hits\"][\"hits\"]:\n data.append({\n 'client_ip': item['_source'].get('client_ip'),\n 'client': item['_source'].get('client'),\n 'log': item['_source'].get('log'),\n 'service': item['_source'].get('service'),\n })\n response = {\"data\": data}\n return response", "def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "async def modlog(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n entity=self.bot.get_command(\"modlog\")\r\n p = await HelpPaginator.from_command(ctx, entity)\r\n await p.paginate()\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n else:\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)", "def on_a(self):\r\n self.log()", "def add_audit_cols(df, changedt):\n df = df.withColumn(\"operation\", f.lit(\"I\")) \\\n .withColumn(\"processeddate\", f.current_timestamp().cast(\"String\")) \\\n .withColumn(\"changedate\", f.lit(changedt)) \\\n .withColumn('changedate_year', f.year('changedate').cast(\"String\")) \\\n .withColumn('changedate_month', f.month('changedate').cast(\"String\")) \\\n .withColumn('changedate_day', f.dayofmonth('changedate').cast(\"String\"))\n return df", "def record(params, git_info = {}):\n print \"recording...\"\n\n try:\n # connect to MongoDB\n # config = json.load(open(os.environ.get('HOME') + \"/sandbox/config.json\"))\n config = json.load(open(os.environ.get('HOME') + \"/LSEMS/config.json\"))\n try:\n client = MongoClient(config[\"mongodb_url\"])\n except Exception as e:\n raise Exception(\"fail to connect to given MongoDB address: \" + DB_addr)\n\n # check and run the thing\n missing = checkKeys(params, ['data_set', 'src', 'type', 'param'])\n if len(missing) != 0:\n raise Exception(\"missing attribute\"+('s' if len(missing)!=1 else '')+\": \"+str(missing))\n\n params['time'] = asctime()\n params['commit_id'] = git_info['commit_id']\n params['name'] = git_info['name']\n repo_name = git_info['repo_name']\n params['repo_name'] = repo_name\n user = verifyUser(client, git_info['name'])\n\n exp = user.find_one({'exp_name': repo_name})\n if not exp:\n print 'adding new experiment '+repo_name+'...'\n user.insert({'exp_name': repo_name, 'exp_records':[]})\n old_records = user.find_one({'exp_name': repo_name})['exp_records']\n user.update({'exp_name': repo_name}, {'$set': {'exp_records': old_records + [params]}})\n\n print params\n #user.insert(params)\n client.close()\n return True,params\n except Exception as e:\n print e\n print \"Aborting...\"\n return False,{}", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def command(ctx):\n ctx.setup_logger(format='')", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def test_sudoers_audit(host):\n with host.sudo():\n sudoers_access = host.run(\"touch /etc/sudoers\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/sudoers\\\"\")\n assert audit_log.stdout", "def sync_volumeaccessright_record( vac ):\n \n syndicate_caps = \"UNKNOWN\" # for exception handling\n \n # get arguments\n config = observer_core.get_config()\n principal_id = vac.owner_id.email\n volume_name = vac.volume.name\n syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) \n \n logger.info( \"Sync VolumeAccessRight for (%s, %s)\" % (principal_id, volume_name) )\n \n # validate config\n try:\n observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET\")\n raise e\n \n # ensure the user exists and has credentials\n try:\n rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )\n assert rc is True, \"Failed to ensure principal %s exists (rc = %s,%s)\" % (principal_id, rc, user)\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to ensure user '%s' exists\" % principal_id )\n raise e\n\n # grant the slice-owning user the ability to provision UGs in this Volume\n try:\n rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )\n assert rc is True, \"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name)\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name))\n raise e\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Faoed to ensure user %s can access Volume %s with rights %s\" % (principal_id, volume_name, syndicate_caps))\n raise e\n\n return True", "def test_log_extra_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step_extra(len, print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def log_all(self):\n self.save_raw()\n self.log()", "def __init__(__self__, *,\n copy_log_details_type: str,\n disk_serial_number: str,\n error_log_link: str,\n verbose_log_link: str):\n pulumi.set(__self__, \"copy_log_details_type\", 'DataBoxDisk')\n pulumi.set(__self__, \"disk_serial_number\", disk_serial_number)\n pulumi.set(__self__, \"error_log_link\", error_log_link)\n pulumi.set(__self__, \"verbose_log_link\", verbose_log_link)", "def upgrade_to_15():\n query = {}\n query['$or'] = [\n {'timestamp':''},\n {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$not': {'$type':2}}},\n {'timestamp': {'$not': {'$type':9}}}\n ]}\n ]\n unset = {'$unset': {'timestamp': ''}}\n\n config.db.sessions.update_many(query, unset)\n config.db.acquisitions.update_many(query, unset)\n\n query = {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$type':2}}\n ]}\n sessions = config.db.sessions.find(query)\n for s in sessions:\n try:\n fixed_timestamp = dateutil.parser.parse(s['timestamp'])\n except:\n config.db.sessions.update_one({'_id': s['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': s['_id']}, {'$set': {'timestamp': fixed_timestamp}})\n\n acquisitions = config.db.acquisitions.find(query)\n for a in acquisitions:\n try:\n fixed_timestamp = dateutil.parser.parse(a['timestamp'])\n except:\n config.db.sessions.update_one({'_id': a['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': a['_id']}, {'$set': {'timestamp': fixed_timestamp}})", "def clb_access_logging_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n if elb.describe_load_balancer_attributes(LoadBalancerName=clbName)[\"LoadBalancerAttributes\"][\"AccessLog\"][\"Enabled\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def test_dump_collection(self):\n\n test_oplog, primary_conn, search_ts = self.get_oplog_thread()\n solr = DocManager()\n test_oplog.doc_manager = solr\n\n #with documents\n primary_conn['test']['test'].insert({'name': 'paulie'})\n search_ts = test_oplog.get_last_oplog_timestamp()\n test_oplog.dump_collection()\n\n test_oplog.doc_manager.commit()\n solr_results = solr._search()\n self.assertEqual(len(solr_results), 1)\n solr_doc = solr_results[0]\n self.assertEqual(long_to_bson_ts(solr_doc['_ts']), search_ts)\n self.assertEqual(solr_doc['name'], 'paulie')\n self.assertEqual(solr_doc['ns'], 'test.test')", "def save_to_mongo(self, collection='pending_trailers'):\n Database.insert(collection=collection, data=self.json())", "def test_post_add_log_event(self):\n pass", "def enable_audit_logging(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def create_audit_log_for_request_decorator(response):\n return create_audit_log_for_request(response)\n\n if is_audit_enabled():\n # we can't add the `after_this_request` and\n # `create_audit_log_for_request_decorator` decorators to the\n # functions directly, because `is_audit_enabled` depends on\n # the config being loaded\n flask.after_this_request(create_audit_log_for_request_decorator)\n return f(*args, **kwargs)\n\n return wrapper", "def test_enable_local_caching(sdc_builder, sdc_executor, stage_attributes, cluster):\n\n dir_path = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_lowercase))\n table_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n directory = builder.add_stage('Directory', type='origin')\n directory.data_format = 'JSON'\n directory.json_content = 'MULTIPLE_OBJECTS'\n directory.files_directory = dir_path\n directory.file_name_pattern = '*.json'\n directory.file_name_pattern_mode = 'GLOB'\n\n kudu = builder.add_stage('Kudu Lookup')\n kudu.kudu_table_name = f'impala::default.{table_name}'\n kudu.key_columns_mapping = [dict(field='/f1', columnName='id')]\n kudu.column_to_output_field_mapping = [dict(field='/d1', columnName='name', defaultValue='no_name')]\n kudu.missing_lookup_behavior = 'PASS_RECORD_ON'\n kudu.enable_table_caching = True\n kudu.eviction_policy_type = 'EXPIRE_AFTER_WRITE'\n kudu.expiration_time = 1\n kudu.time_unit = 'HOURS'\n kudu.set_attributes(**stage_attributes)\n\n wiretap = builder.add_wiretap()\n\n directory >> kudu >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String),\n impala_partition_by='HASH PARTITIONS 16',\n impala_stored_as='KUDU',\n impala_table_properties={\n 'kudu.master_addresses': f'{cluster.server_host}:{DEFAULT_KUDU_PORT}',\n 'kudu.num_tablet_replicas': '1'\n })\n\n engine = cluster.kudu.engine\n table.create(engine)\n\n try:\n sdc_executor.execute_shell(f'mkdir -p {dir_path}')\n sdc_executor.write_file(os.path.join(dir_path, 'a.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name1\"}))\n\n conn = engine.connect()\n conn.execute(table.insert(), [{'id': 1, 'name': 'name1'}])\n\n status = sdc_executor.start_pipeline(pipeline)\n status.wait_for_pipeline_batch_count(2)\n\n conn.execute(table.update().where(table.c.id == 1).values(name='name2'))\n\n sdc_executor.write_file(os.path.join(dir_path, 'b.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name2\"}))\n\n status.wait_for_pipeline_batch_count(4)\n\n output_records = [record.field for record in wiretap.output_records]\n\n if stage_attributes['enable_local_caching']:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name1'}] == output_records\n else:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name2'}] == output_records\n\n finally:\n try:\n sdc_executor.stop_pipeline(pipeline)\n finally:\n table.drop(engine)\n sdc_executor.execute_shell(f'rm -fr {dir_path}')", "def InsertLog():", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')", "def GetLogs(self):\n raise NotImplementedError()", "def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)", "def logtool(self, action, **options):\n pass", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def ocsaudit_rest_log_command(method, url, url_args, username):\n \n try:\n if method == \"GET\":\n type = cmd_type.type_get\n elif method == \"POST\":\n type = cmd_type.type_post\n elif method == \"PATCH\":\n type = cmd_type.type_patch\n elif method == \"DELETE\":\n type = cmd_type.type_delete\n else:\n type = cmd_type.type_unknown\n print \"Unidentified command type {0}\".format(method)\n \n url = url.split(\"/v1/\",1)[1]\n args = \" \".join(url_args)\n \n ocsaudit_log_command(username, type, cmd_interface.interface_rest, \n url, args)\n except Exception as e:\n print \"ocsaudit_rest_log_command Exception {0}\".format(e)", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )" ]
[ "0.589836", "0.57862264", "0.5479396", "0.53276616", "0.5070259", "0.50430673", "0.5013744", "0.49703205", "0.49509078", "0.4943958", "0.49248308", "0.49248308", "0.49054325", "0.4890508", "0.48703638", "0.48651615", "0.48189864", "0.48181394", "0.48126942", "0.48086238", "0.47714564", "0.47645283", "0.47190982", "0.469417", "0.4682236", "0.4658147", "0.46523142", "0.46427134", "0.4627733", "0.4600334", "0.4583746", "0.45829535", "0.45610845", "0.45487535", "0.45369792", "0.45296195", "0.45200622", "0.45158532", "0.45135257", "0.45087272", "0.44933844", "0.44896442", "0.44780484", "0.44596392", "0.44573265", "0.44514826", "0.44424143", "0.44398478", "0.44218993", "0.4421767", "0.4411959", "0.4401724", "0.43963262", "0.43848306", "0.43804586", "0.43776047", "0.4370529", "0.43691784", "0.436865", "0.4366847", "0.4360559", "0.4349807", "0.434768", "0.4340404", "0.43367457", "0.4336186", "0.4335609", "0.4328269", "0.43278924", "0.43219286", "0.43152776", "0.43145692", "0.43139264", "0.4311371", "0.43076736", "0.4305778", "0.43036142", "0.4301282", "0.4298705", "0.4298513", "0.4293655", "0.42844364", "0.42766058", "0.42735416", "0.42704204", "0.42666212", "0.42655167", "0.4263765", "0.4263708", "0.42612353", "0.42610663", "0.42585126", "0.4258335", "0.42568165", "0.42554635", "0.42527717", "0.42525977", "0.42492634", "0.42472807", "0.42348155" ]
0.42488885
98
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. This operation depends on the audit log feature of ApsaraDB for MongoDB. You can enable the audit log feature based on your business needs. For more information, see [Enable the audit log feature](~~59903~~) Starting from January 6, 2022, the official edition of the audit log feature has been launched in all regions, and new applications for the free trial edition have ended. For more information, see [Notice on official launch of the payasyougo audit log feature and no more application for the free trial edition](~~377480~~)
async def describe_mongo_dblog_config_async( self, request: dds_20151201_models.DescribeMongoDBLogConfigRequest, ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse: runtime = util_models.RuntimeOptions() return await self.describe_mongo_dblog_config_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_audit_monitoring():\n __enable_data_access_logging()\n __enable_log_streaming()\n __create_audit_alerts()\n __get_incidents_history()", "def __enable_data_access_logging():\n _tempFile = \"tmp_audit_config.json\"\n\n auditConfig = {\n \"auditConfigs\": [\n {\n \"auditLogConfigs\": [\n {\n \"logType\": \"ADMIN_READ\"\n },\n {\n \"logType\": \"DATA_WRITE\"\n },\n {\n \"logType\": \"DATA_READ\"\n }\n ],\n \"service\": \"allServices\",\n }\n ]\n }\n\n # get current policy\n run_command('gcloud projects get-iam-policy {} --format=json >>{}'.format(PROJECT_ID, _tempFile))\n\n # merge it with above-defined config\n merge_JSON(auditConfig, _tempFile)\n\n # set the policy\n run_command('gcloud projects set-iam-policy {} {}'.format(PROJECT_ID, _tempFile))\n\n # delete the temp file\n run_command('rm {}'.format(_tempFile))", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def test_mongodb_oplog_origin(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n time_now = int(time.time())\n mongodb_oplog = pipeline_builder.add_stage('MongoDB Oplog')\n database_name = get_random_string(ascii_letters, 10)\n # Specify that MongoDB Oplog needs to read changes occuring after time_now.\n mongodb_oplog.set_attributes(collection='oplog.rs', initial_timestamp_in_secs=time_now, initial_ordinal=1)\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_oplog >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Insert documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are inserted in that collection.\n mongodb_database = mongodb.engine[database_name]\n mongodb_collection = mongodb_database[get_random_string(ascii_letters, 10)]\n input_rec_count = 6\n inserted_list = mongodb_collection.insert_many([{'x': i} for i in range(input_rec_count)])\n assert len(inserted_list.inserted_ids) == input_rec_count\n\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(snapshot[mongodb_oplog].output) == input_rec_count\n for record in list(enumerate(snapshot[mongodb_oplog].output)):\n assert record[1].value['value']['o']['value']['x']['value'] == str(record[0])\n # Verify the operation type is 'i' which is for 'insert' since we inserted the records earlier.\n assert record[1].value['value']['op']['value'] == 'i'\n assert record[1].value['value']['ts']['value']['timestamp']['value'] > time_now\n\n finally:\n logger.info('Dropping %s database...', database_name)\n mongodb.engine.drop_database(database_name)", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def get_audit(self, query, session):\n raise NotImplementedError()", "def test_old_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 10, 9)\n assert key.audit_state == 'stagnant_expire'", "def MongoLog(self, request_number, process, log_message):\n try:\n print(\"Attempting to connect to MongoDB...\")\n client = MongoClient('localhost', 27017)\n db = client.database\n collection = db.logging_database\n\n status_log = {\"Request_No\": request_number, \"Brewing_Process\": process, \"Log_Message\": log_message,\n \"Time\": datetime.datetime.now()}\n\n try:\n collection.insert_one(status_log)\n except TypeError: # Error Handling for MongoDB versions that do not implement insert_one() method\n collection.insert(status_log)\n\n print(status_log)\n except Exception as e:\n print(\"MongoDB connection Error:\" + str(e))", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def view_audit_log(_) -> int:\n return 1 << 7", "def view_audit_log(_) -> int:\n return 1 << 7", "def test_normal(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n k = Key('username', 'keyid', 'Active', created, last_used)\n k.audit(60, 80, 20, 19)\n assert k.creation_age == 15\n assert k.audit_state == 'good'", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def log(msg):\n\n print('datastore: %s' % msg)", "def AddSqlServerAudit(parser):\n parser.add_argument(\n '--audit-bucket-path',\n required=False,\n help=(\n 'The location, as a Cloud Storage bucket, to which audit files are '\n 'uploaded. The URI is in the form gs://bucketName/folderName. Only '\n 'available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-retention-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='7d'),\n required=False,\n help=(\n 'The number of days for audit log retention on disk, for example, 3d'\n 'for 3 days. Only available for SQL Server instances.'\n ),\n )\n\n parser.add_argument(\n '--audit-upload-interval',\n default=None,\n type=arg_parsers.Duration(upper_bound='720m'),\n required=False,\n help=(\n 'How often to upload audit logs (audit files), for example, 30m'\n 'for 30 minutes. Only available for SQL Server instances.'\n ),\n )", "def setup_tap_mongodb(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mongodb.sh')\n self._run_command(db_script)", "def logs(self, container: Container) -> str:", "def add_log4mongo():\n remove_log4mongo()\n root = logging.getLogger()\n root.addHandler(MongoHandler())", "def _add_connection_info(report_kvs, db):\n report_kvs['Flavor'] = 'mongodb'\n report_kvs['Database'] = db.name\n report_kvs['RemoteHost'] = db.connection.host\n report_kvs['RemotePort'] = db.connection.port", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def update_log(ident, document):\n logs_col.update_one({\"_id\": ident}, {\"$set\": document})", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def audit(msg):\n global auditLogger\n if auditLogger is not None:\n auditLogger.info(msg)", "def get_log(ident):\n return logs_col.find_one({\"_id\": ObjectId(ident)})", "def audit(audit_code, audit_str, request, system_initiated=False):\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if system_initiated is False:\n ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n now, err = datetime_utils.get_epoch(when='now', num_previous_days=0)\n if err:\n raise Exception(er)\n if system_initiated:\n username = 'System'\n source_ip = 'System'\n else:\n username = request.user.username\n source_ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n command_list = []\n cmd = [\n 'insert into audit(audit_time, username, source_ip, audit_code, audit_str) values (?,?,?,?,?)', (now, username, source_ip, audit_code, audit_str,)]\n command_list.append(cmd)\n audit_id, err = db.execute_iud(db_path, command_list, get_rowid=True)\n if err:\n raise Exception(err)\n ret, err = event_notifications.record_event_notification_holding(\n event_id=audit_id, event_type_id=2)\n if err:\n raise Exception(err)\n\n except Exception, e:\n return False, 'Error performing an audit operation : %s' % str(e)\n else:\n return True, None", "def event_log(self):\n pass", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def test_mongo_logging_client_persists_log():\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message", "def to(cls, database=\"AUDIT\", collection=\"log\",\n mongodb_uri=\"mongodb://localhost:27017\", level=logging.NOTSET):\n return cls(mongodb_uri, database, collection, level)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def audit(self):\n self.ping()", "def db_for_write(self, model, **hints):\n if model == FilterRecordingTracking:\n return 'db_rest_api'\n return None", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def setup_logging():\n log.setup('keystone')", "def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')", "def logQuote(cmdDict):\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def cf_log_analytics_data_plane(cli_ctx, _):\n from azure.loganalytics import LogAnalyticsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.log_analytics_resource_id)\n api_version = 'v1'\n return LogAnalyticsDataClient(cred,\n base_url=cli_ctx.cloud.endpoints.log_analytics_resource_id + '/' + api_version)", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def test_aud_from_log_ignores_index():\n assert True", "def main():\n\n args = cli.get_args()\n\n audit = {}\n try:\n service_instance,content = connect_vc(args.host,args.user,args.password,args.port)\n\n if sys.stdout.isatty():\n print(\"vCenter: %s\" % args.host)\n \n content = service_instance.RetrieveContent()\n\n container = content.rootFolder # starting point to look into\n datacenters = get_datacenters(content)\n for dc in datacenters:\n datacenters[dc]['clusters'] = get_clusters(datacenters[dc]['dc'])\n\n datacenters[dc]['vms'] = get_vms(datacenters[dc]['dc'].vmFolder)\n \n get_nets(dc)\n get_dstores(dc)\n\n vmcount=0\n \n for dc in datacenters:\n for vm in sorted(datacenters[dc]['vms'],key=lambda s: s.lower()):\n vmcount+=1\n v = datacenters[dc]['vms'][vm]\n c = find_cluster(datacenters[dc]['clusters'],v.runtime.host.name)\n vort = \"Template\" if v.summary.config.template == True else \"VM\"\n audit[v.name]={}\n audit[v.name]['datacenter'] = dc\n audit[v.name]['cluster'] = c\n audit[v.name]['type'] = vort\n audit[v.name]['hostname'] = v.summary.guest.hostName\n audit[v.name]['guestid'] = v.config.guestId\n audit[v.name]['fullname'] = v.summary.config.guestFullName\n audit[v.name]['state'] = v.runtime.powerState\n audit[v.name]['ip'] = v.guest.ipAddress\n if sys.stdout.isatty():\n print(vmcount,\"Guests processed\",end='\\r')\n sys.stdout.flush()\n# print(\"%-15s:%-10s %-8s %-30s %-30s %s %s %s %s\" % (dc, c, vort,v.name,v.summary.guest.hostName, v.config.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress ))\n #print vort, v.name, v.summary.guest.hostName, v.guest.guestId, v.summary.config.guestFullName,v.guest.guestState,v.guest.ipAddress #,v.summary\n# print(\"\\ncount:\",vmcount)\n \n print(json.dumps(audit, indent=4, separators=(',', ': ')))\n \n except vmodl.MethodFault as error:\n print(\"Caught vmodl fault : \" + error.msg)\n return -1\n\n return 0", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')", "def log_image(data_category, image_name, path=None, plot=None, **kwargs):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML MetricImage({})\".format(image_name))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log_image(image_name, path, plot, **kwargs)\n run.flush()", "def print_aldb(service):\n # For now this sends logs to the log file.\n # Future direction is to create an INSTEON control panel.\n entity_id = service.data[CONF_ENTITY_ID]\n signal = f\"{entity_id}_{SIGNAL_PRINT_ALDB}\"\n dispatcher_send(hass, signal)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def create_sync_entry(ts, coll, idx):\n sync_log = connection.ElasticLogs()\n sync_log.ts = ts\n sync_log.coll = unicode(coll)\n sync_log.idx = unicode(idx)\n sync_log.save()\n return True", "def log_event(event):\n LOGGER.info(\"====================================================\")\n LOGGER.info(event)\n LOGGER.info(\"====================================================\")", "def enable_access_logging(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_access_logging\")", "def modify_audit_log_filter_with_options(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.filter):\n query['Filter'] = request.filter\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_type):\n query['RoleType'] = request.role_type\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyAuditLogFilter',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyAuditLogFilterResponse(),\n self.call_api(params, req, runtime)\n )", "def __init__(self) -> None:\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)", "def test_rotate_access(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(60, 80, 20, 10)\n assert key.audit_state == 'stagnant'", "def test_read_namespaced_deployment_log_log(self):\n pass", "def test_resourcelog(client, test_database, session):\n test_database.refresh()\n\n usersession_id = 1\n remote_addr = \"127.0.0.1\"\n\n # Without payload\n r = client.get(\"/api/v1/config/\")\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 2 # 2 entries since API did a login as first entry\n\n rl = rlogs[-1]\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"GET\"\n assert rl.resource == \"/api/v1/config/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # With payload\n\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 4 # 4 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 200\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Make sure /login doesn't log passwords\n payload_data = {\"username\": \"abc\", \"password\": \"123\"}\n r = client.post(\"/api/v1/users/actions/login/\", payload_data)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 6 # 6 since /currentuser is called to check whether logged in\n\n rl = rlogs[-1]\n assert statuscode == 401 # User doesn't exist\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id == usersession_id\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/users/actions/login/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload is None\n assert rl.payload_size == 0\n assert rl.query == \"\"\n assert rl.duration > 0\n assert isinstance(rl.time, datetime.datetime)\n\n # Test logging when not logged in\n payload_data = {\n \"allele_ids\": [1],\n \"gp_name\": \"HBOCUTV\",\n \"gp_version\": \"v01\",\n \"referenceassessments\": [],\n }\n client.logout()\n r = client.post(\"/api/v1/acmg/alleles/?dummy=data\", payload_data, username=None)\n payload = json.dumps(payload_data)\n payload_size = len(payload)\n statuscode = r.status_code\n response_size = int(r.headers.get(\"Content-Length\"))\n\n rlogs = session.query(log.ResourceLog).all()\n assert len(rlogs) == 9 # logout counts as 1\n\n rl = rlogs[-1]\n assert statuscode == 403\n assert rl.remote_addr == remote_addr\n assert rl.usersession_id is None\n assert rl.method == \"POST\"\n assert rl.resource == \"/api/v1/acmg/alleles/\"\n assert rl.statuscode == statuscode\n assert rl.response_size == response_size\n assert rl.payload == payload\n assert rl.payload_size == payload_size\n assert rl.query == \"dummy=data\"\n assert isinstance(rl.time, datetime.datetime)", "def upgrade_to_21():\n\n def update_project_template(template):\n new_template = {'acquisitions': []}\n for a in template.get('acquisitions', []):\n new_a = {'minimum': a['minimum']}\n properties = a['schema']['properties']\n if 'measurement' in properties:\n m_req = properties['measurement']['pattern']\n m_req = re.sub('^\\(\\?i\\)', '', m_req)\n new_a['files']=[{'measurement': m_req, 'minimum': 1}]\n if 'label' in properties:\n l_req = properties['label']['pattern']\n l_req = re.sub('^\\(\\?i\\)', '', l_req)\n new_a['label'] = l_req\n new_template['acquisitions'].append(new_a)\n\n return new_template\n\n def dm_v2_updates(cont_list, cont_name):\n for container in cont_list:\n\n query = {'_id': container['_id']}\n update = {'$rename': {'metadata': 'info'}}\n\n if cont_name == 'projects' and container.get('template'):\n new_template = update_project_template(json.loads(container.get('template')))\n update['$set'] = {'template': new_template}\n\n\n if cont_name == 'sessions':\n update['$rename'].update({'subject.metadata': 'subject.info'})\n\n\n measurement = None\n modality = None\n info = None\n if cont_name == 'acquisitions':\n update['$unset'] = {'instrument': '', 'measurement': ''}\n measurement = container.get('measurement', None)\n modality = container.get('instrument', None)\n info = container.get('metadata', None)\n if info:\n config.db.acquisitions.update_one(query, {'$set': {'metadata': {}}})\n\n\n # From mongo docs: '$rename does not work if these fields are in array elements.'\n files = container.get('files')\n if files is not None:\n updated_files = []\n for file_ in files:\n file_['info'] = {}\n if 'metadata' in file_:\n file_['info'] = file_.pop('metadata', None)\n if 'instrument' in file_:\n file_['modality'] = file_.pop('instrument', None)\n if measurement:\n # Move the acquisition's measurement to all files\n if file_.get('measurements'):\n file_['measurements'].append(measurement)\n else:\n file_['measurements'] = [measurement]\n if info and file_.get('type', '') == 'dicom':\n # This is going to be the dicom header info\n updated_info = info\n updated_info.update(file_['info'])\n file_['info'] = updated_info\n if modality and not file_.get('modality'):\n file_['modality'] = modality\n\n updated_files.append(file_)\n if update.get('$set'):\n update['$set']['files'] = updated_files\n else:\n update['$set'] = {'files': updated_files}\n\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$or':[{'files.metadata': { '$exists': True}},\n {'metadata': { '$exists': True}},\n {'files.instrument': { '$exists': True}}]}\n\n dm_v2_updates(config.db.collections.find(query), 'collections')\n\n query['$or'].append({'template': { '$exists': True}})\n dm_v2_updates(config.db.projects.find({}), 'projects')\n\n query['$or'].append({'subject': { '$exists': True}})\n dm_v2_updates(config.db.sessions.find(query), 'sessions')\n\n query['$or'].append({'instrument': { '$exists': True}})\n query['$or'].append({'measurement': { '$exists': True}})\n dm_v2_updates(config.db.acquisitions.find(query), 'acquisitions')", "def allow_map_to_audit(self):\n return self.audit_id is None and self.audit is None", "def search_log(doc_type, query):\n if query:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ],\n \"query\": {\n \"term\": {\n \"_all\": \"the\"\n }\n }\n }\n else:\n body = {\n \"from\": 0,\n \"size\": 50,\n \"sort\": [\n {\n \"created_at\": {\n \"order\": \"desc\"\n }\n }\n ]\n }\n if doc_type:\n print \"condition 1 true\"\n res = es.search(index=\"logs\", doc_type=str(doc_type).strip(), body=body)\n else:\n res = es.search(index=\"logs\", body=body)\n\n data = []\n if not res.get('timed_out'):\n for item in res[\"hits\"][\"hits\"]:\n data.append({\n 'client_ip': item['_source'].get('client_ip'),\n 'client': item['_source'].get('client'),\n 'log': item['_source'].get('log'),\n 'service': item['_source'].get('service'),\n })\n response = {\"data\": data}\n return response", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "def test_log_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step(print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "async def modlog(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n entity=self.bot.get_command(\"modlog\")\r\n p = await HelpPaginator.from_command(ctx, entity)\r\n await p.paginate()\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n else:\r\n server = ctx.guild\r\n if str(server.id) not in self._logs:\r\n self._logs[str(server.id)] = {}\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"channel\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"channel\"] = None\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"toggle\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case#\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case#\"] = 0\r\n dataIO.save_json(self._logs_file, self._logs)\r\n if \"case\" not in self._logs[str(server.id)]:\r\n self._logs[str(server.id)][\"case\"] = {}\r\n dataIO.save_json(self._logs_file, self._logs)", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def on_a(self):\r\n self.log()", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def record(params, git_info = {}):\n print \"recording...\"\n\n try:\n # connect to MongoDB\n # config = json.load(open(os.environ.get('HOME') + \"/sandbox/config.json\"))\n config = json.load(open(os.environ.get('HOME') + \"/LSEMS/config.json\"))\n try:\n client = MongoClient(config[\"mongodb_url\"])\n except Exception as e:\n raise Exception(\"fail to connect to given MongoDB address: \" + DB_addr)\n\n # check and run the thing\n missing = checkKeys(params, ['data_set', 'src', 'type', 'param'])\n if len(missing) != 0:\n raise Exception(\"missing attribute\"+('s' if len(missing)!=1 else '')+\": \"+str(missing))\n\n params['time'] = asctime()\n params['commit_id'] = git_info['commit_id']\n params['name'] = git_info['name']\n repo_name = git_info['repo_name']\n params['repo_name'] = repo_name\n user = verifyUser(client, git_info['name'])\n\n exp = user.find_one({'exp_name': repo_name})\n if not exp:\n print 'adding new experiment '+repo_name+'...'\n user.insert({'exp_name': repo_name, 'exp_records':[]})\n old_records = user.find_one({'exp_name': repo_name})['exp_records']\n user.update({'exp_name': repo_name}, {'$set': {'exp_records': old_records + [params]}})\n\n print params\n #user.insert(params)\n client.close()\n return True,params\n except Exception as e:\n print e\n print \"Aborting...\"\n return False,{}", "def add_audit_cols(df, changedt):\n df = df.withColumn(\"operation\", f.lit(\"I\")) \\\n .withColumn(\"processeddate\", f.current_timestamp().cast(\"String\")) \\\n .withColumn(\"changedate\", f.lit(changedt)) \\\n .withColumn('changedate_year', f.year('changedate').cast(\"String\")) \\\n .withColumn('changedate_month', f.month('changedate').cast(\"String\")) \\\n .withColumn('changedate_day', f.dayofmonth('changedate').cast(\"String\"))\n return df", "def command(ctx):\n ctx.setup_logger(format='')", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def test_sudoers_audit(host):\n with host.sudo():\n sudoers_access = host.run(\"touch /etc/sudoers\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/sudoers\\\"\")\n assert audit_log.stdout", "def sync_volumeaccessright_record( vac ):\n \n syndicate_caps = \"UNKNOWN\" # for exception handling\n \n # get arguments\n config = observer_core.get_config()\n principal_id = vac.owner_id.email\n volume_name = vac.volume.name\n syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) \n \n logger.info( \"Sync VolumeAccessRight for (%s, %s)\" % (principal_id, volume_name) )\n \n # validate config\n try:\n observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET\")\n raise e\n \n # ensure the user exists and has credentials\n try:\n rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )\n assert rc is True, \"Failed to ensure principal %s exists (rc = %s,%s)\" % (principal_id, rc, user)\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to ensure user '%s' exists\" % principal_id )\n raise e\n\n # grant the slice-owning user the ability to provision UGs in this Volume\n try:\n rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )\n assert rc is True, \"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name)\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name))\n raise e\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Faoed to ensure user %s can access Volume %s with rights %s\" % (principal_id, volume_name, syndicate_caps))\n raise e\n\n return True", "def test_log_extra_custom_logger(caplog, test_df):\n caplog.clear()\n\n logger_name = \"my_custom_logger\"\n\n my_logger = logging.getLogger(logger_name)\n\n @log_step_extra(len, print_fn=my_logger.info)\n def do_nothing(df, *args, **kwargs):\n return df\n\n with caplog.at_level(logging.INFO):\n test_df.pipe(do_nothing)\n\n assert logger_name in caplog.text", "def log_all(self):\n self.save_raw()\n self.log()", "def __init__(__self__, *,\n copy_log_details_type: str,\n disk_serial_number: str,\n error_log_link: str,\n verbose_log_link: str):\n pulumi.set(__self__, \"copy_log_details_type\", 'DataBoxDisk')\n pulumi.set(__self__, \"disk_serial_number\", disk_serial_number)\n pulumi.set(__self__, \"error_log_link\", error_log_link)\n pulumi.set(__self__, \"verbose_log_link\", verbose_log_link)", "def upgrade_to_15():\n query = {}\n query['$or'] = [\n {'timestamp':''},\n {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$not': {'$type':2}}},\n {'timestamp': {'$not': {'$type':9}}}\n ]}\n ]\n unset = {'$unset': {'timestamp': ''}}\n\n config.db.sessions.update_many(query, unset)\n config.db.acquisitions.update_many(query, unset)\n\n query = {'$and': [\n {'timestamp': {'$exists': True}},\n {'timestamp': {'$type':2}}\n ]}\n sessions = config.db.sessions.find(query)\n for s in sessions:\n try:\n fixed_timestamp = dateutil.parser.parse(s['timestamp'])\n except:\n config.db.sessions.update_one({'_id': s['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': s['_id']}, {'$set': {'timestamp': fixed_timestamp}})\n\n acquisitions = config.db.acquisitions.find(query)\n for a in acquisitions:\n try:\n fixed_timestamp = dateutil.parser.parse(a['timestamp'])\n except:\n config.db.sessions.update_one({'_id': a['_id']}, {'$unset': {'timestamp': ''}})\n continue\n config.db.sessions.update_one({'_id': a['_id']}, {'$set': {'timestamp': fixed_timestamp}})", "def clb_access_logging_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n elb = session.client(\"elb\")\n # ISO Time\n iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())\n for lb in describe_clbs(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(lb,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n clbName = lb[\"LoadBalancerName\"]\n clbArn = f\"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}\"\n dnsName = lb[\"DNSName\"]\n lbSgs = lb[\"SecurityGroups\"]\n lbSubnets = lb[\"Subnets\"]\n lbAzs = lb[\"AvailabilityZones\"]\n lbVpc = lb[\"VPCId\"]\n clbScheme = lb[\"Scheme\"]\n # Get Attrs\n if elb.describe_load_balancer_attributes(LoadBalancerName=clbName)[\"LoadBalancerAttributes\"][\"AccessLog\"][\"Enabled\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled. Refer to the remediation instructions to remediate this behavior\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": clbArn + \"/classic-loadbalancer-access-logging-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": clbArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ELB.5] Classic load balancers should enable access logging\",\n \"Description\": \"Classic load balancer \"\n + clbName\n + \" does not have access logging enabled.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"AWS Elastic Load Balancer\",\n \"AssetComponent\": \"Classic Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsElbLoadBalancer\",\n \"Id\": clbArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsElbLoadBalancer\": {\n \"DnsName\": dnsName,\n \"Scheme\": clbScheme,\n \"SecurityGroups\": lbSgs,\n \"Subnets\": lbSubnets,\n \"VpcId\": lbVpc,\n \"AvailabilityZones\": lbAzs,\n \"LoadBalancerName\": clbName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-3\",\n \"NIST CSF V1.1 DE.AE-1\",\n \"NIST CSF V1.1 DE.AE-3\",\n \"NIST CSF V1.1 DE.CM-1\",\n \"NIST CSF V1.1 DE.CM-7\",\n \"NIST CSF V1.1 PR.PT-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AU-6\",\n \"NIST SP 800-53 Rev. 4 AU-12\",\n \"NIST SP 800-53 Rev. 4 CA-3\",\n \"NIST SP 800-53 Rev. 4 CA-7\",\n \"NIST SP 800-53 Rev. 4 CA-9\",\n \"NIST SP 800-53 Rev. 4 CM-2\",\n \"NIST SP 800-53 Rev. 4 CM-3\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 IR-4\",\n \"NIST SP 800-53 Rev. 4 IR-5\",\n \"NIST SP 800-53 Rev. 4 IR-8\",\n \"NIST SP 800-53 Rev. 4 PE-3\",\n \"NIST SP 800-53 Rev. 4 PE-6\",\n \"NIST SP 800-53 Rev. 4 PE-20\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.12.1.1\",\n \"ISO 27001:2013 A.12.1.2\",\n \"ISO 27001:2013 A.12.4.1\",\n \"ISO 27001:2013 A.12.4.2\",\n \"ISO 27001:2013 A.12.4.3\",\n \"ISO 27001:2013 A.12.4.4\",\n \"ISO 27001:2013 A.12.7.1\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.2\",\n \"ISO 27001:2013 A.14.2.7\",\n \"ISO 27001:2013 A.15.2.1\",\n \"ISO 27001:2013 A.16.1.7\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def log_access():\n # todo use project prefix\n tail('/var/log/nginx/access.log')", "def test_dump_collection(self):\n\n test_oplog, primary_conn, search_ts = self.get_oplog_thread()\n solr = DocManager()\n test_oplog.doc_manager = solr\n\n #with documents\n primary_conn['test']['test'].insert({'name': 'paulie'})\n search_ts = test_oplog.get_last_oplog_timestamp()\n test_oplog.dump_collection()\n\n test_oplog.doc_manager.commit()\n solr_results = solr._search()\n self.assertEqual(len(solr_results), 1)\n solr_doc = solr_results[0]\n self.assertEqual(long_to_bson_ts(solr_doc['_ts']), search_ts)\n self.assertEqual(solr_doc['name'], 'paulie')\n self.assertEqual(solr_doc['ns'], 'test.test')", "def save_to_mongo(self, collection='pending_trailers'):\n Database.insert(collection=collection, data=self.json())", "def test_post_add_log_event(self):\n pass", "def enable_audit_logging(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n def create_audit_log_for_request_decorator(response):\n return create_audit_log_for_request(response)\n\n if is_audit_enabled():\n # we can't add the `after_this_request` and\n # `create_audit_log_for_request_decorator` decorators to the\n # functions directly, because `is_audit_enabled` depends on\n # the config being loaded\n flask.after_this_request(create_audit_log_for_request_decorator)\n return f(*args, **kwargs)\n\n return wrapper", "def InsertLog():", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def test_enable_local_caching(sdc_builder, sdc_executor, stage_attributes, cluster):\n\n dir_path = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_lowercase))\n table_name = get_random_string(string.ascii_lowercase, 10)\n\n builder = sdc_builder.get_pipeline_builder()\n directory = builder.add_stage('Directory', type='origin')\n directory.data_format = 'JSON'\n directory.json_content = 'MULTIPLE_OBJECTS'\n directory.files_directory = dir_path\n directory.file_name_pattern = '*.json'\n directory.file_name_pattern_mode = 'GLOB'\n\n kudu = builder.add_stage('Kudu Lookup')\n kudu.kudu_table_name = f'impala::default.{table_name}'\n kudu.key_columns_mapping = [dict(field='/f1', columnName='id')]\n kudu.column_to_output_field_mapping = [dict(field='/d1', columnName='name', defaultValue='no_name')]\n kudu.missing_lookup_behavior = 'PASS_RECORD_ON'\n kudu.enable_table_caching = True\n kudu.eviction_policy_type = 'EXPIRE_AFTER_WRITE'\n kudu.expiration_time = 1\n kudu.time_unit = 'HOURS'\n kudu.set_attributes(**stage_attributes)\n\n wiretap = builder.add_wiretap()\n\n directory >> kudu >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String),\n impala_partition_by='HASH PARTITIONS 16',\n impala_stored_as='KUDU',\n impala_table_properties={\n 'kudu.master_addresses': f'{cluster.server_host}:{DEFAULT_KUDU_PORT}',\n 'kudu.num_tablet_replicas': '1'\n })\n\n engine = cluster.kudu.engine\n table.create(engine)\n\n try:\n sdc_executor.execute_shell(f'mkdir -p {dir_path}')\n sdc_executor.write_file(os.path.join(dir_path, 'a.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name1\"}))\n\n conn = engine.connect()\n conn.execute(table.insert(), [{'id': 1, 'name': 'name1'}])\n\n status = sdc_executor.start_pipeline(pipeline)\n status.wait_for_pipeline_batch_count(2)\n\n conn.execute(table.update().where(table.c.id == 1).values(name='name2'))\n\n sdc_executor.write_file(os.path.join(dir_path, 'b.json'), json.dumps({\"f1\": 1, \"d1\": \"old_name2\"}))\n\n status.wait_for_pipeline_batch_count(4)\n\n output_records = [record.field for record in wiretap.output_records]\n\n if stage_attributes['enable_local_caching']:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name1'}] == output_records\n else:\n assert [{'f1': 1, 'd1': 'name1'}, {'f1': 1, 'd1': 'name2'}] == output_records\n\n finally:\n try:\n sdc_executor.stop_pipeline(pipeline)\n finally:\n table.drop(engine)\n sdc_executor.execute_shell(f'rm -fr {dir_path}')", "def GetLogs(self):\n raise NotImplementedError()", "def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')", "def logtool(self, action, **options):\n pass", "def log_model_metadata(model_uid, schema, db_conn):\n df = pd.DataFrame({\n 'training_timestamp': [get_current_timestamp()],\n 'model_uid': [model_uid]\n })\n df.to_sql(name='model_metadata', schema=schema, con=db_conn, if_exists='append', index=False)", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def ocsaudit_rest_log_command(method, url, url_args, username):\n \n try:\n if method == \"GET\":\n type = cmd_type.type_get\n elif method == \"POST\":\n type = cmd_type.type_post\n elif method == \"PATCH\":\n type = cmd_type.type_patch\n elif method == \"DELETE\":\n type = cmd_type.type_delete\n else:\n type = cmd_type.type_unknown\n print \"Unidentified command type {0}\".format(method)\n \n url = url.split(\"/v1/\",1)[1]\n args = \" \".join(url_args)\n \n ocsaudit_log_command(username, type, cmd_interface.interface_rest, \n url, args)\n except Exception as e:\n print \"ocsaudit_rest_log_command Exception {0}\".format(e)", "def describe_mongo_dblog_config(\n self,\n request: dds_20151201_models.DescribeMongoDBLogConfigRequest,\n ) -> dds_20151201_models.DescribeMongoDBLogConfigResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_mongo_dblog_config_with_options(request, runtime)", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def modify_audit_log_filter(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_audit_log_filter_with_options(request, runtime)" ]
[ "0.5898722", "0.5786134", "0.5480591", "0.53293675", "0.50717187", "0.50444025", "0.50151235", "0.49692455", "0.4954362", "0.49429396", "0.49257115", "0.49257115", "0.49040037", "0.48921037", "0.48706865", "0.48651257", "0.48216146", "0.4819296", "0.48169842", "0.48097873", "0.47738534", "0.47672382", "0.47203305", "0.46936786", "0.4683501", "0.46613267", "0.46518794", "0.46449718", "0.46291402", "0.45994654", "0.45853475", "0.45850286", "0.45635897", "0.4550033", "0.4536772", "0.45301455", "0.4520394", "0.4515847", "0.45138472", "0.4509694", "0.4494876", "0.44909266", "0.44781277", "0.44584098", "0.4458076", "0.4450262", "0.44424683", "0.4440708", "0.44245133", "0.44218856", "0.44123235", "0.44018343", "0.4395626", "0.43854618", "0.43805957", "0.43782344", "0.43707868", "0.43700668", "0.4368556", "0.43658316", "0.43616137", "0.43499753", "0.43482494", "0.4340306", "0.4338018", "0.43373325", "0.43354473", "0.43292272", "0.43271402", "0.43225357", "0.4316143", "0.4315809", "0.43153846", "0.4310897", "0.43075767", "0.43055347", "0.43022865", "0.43011022", "0.42999592", "0.4298701", "0.4295148", "0.42843503", "0.42776352", "0.4276151", "0.42731354", "0.42685813", "0.4265743", "0.42654696", "0.4261886", "0.42608103", "0.42605546", "0.42594656", "0.42578465", "0.4256306", "0.4255751", "0.42548603", "0.4254041", "0.4253368", "0.42507833", "0.42478174", "0.42357117" ]
0.0
-1
> To query available regions and zones where ApsaraDB for MongoDB instances can be created, call the [DescribeAvailableResource](~~149719~~) operation.
def describe_regions_with_options( self, request: dds_20151201_models.DescribeRegionsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRegionsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accept_language): query['AcceptLanguage'] = request.accept_language if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRegions', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRegionsResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_resource(self, nodename):\n LOG.debug(\"get_available_resource\")\n\n dictval = self._host.properties\n\n return dictval", "def get_available_resource(self, nodename):\n if nodename not in self._drv_nodes:\n return {}\n supported_tuple = ('IA64', 'kvm', 'hvm')\n return {\n 'vcpus': drv_conf.max_vcpus,\n 'memory_mb': drv_conf.max_memory_mb,\n 'local_gb': drv_conf.max_disk_gb,\n 'vcpus_used': 0,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': self.name,\n 'hypervisor_version': '1',\n 'hypervisor_hostname': nodename,\n 'disk_available_least': 0,\n 'cpu_info': '?',\n 'numa_topology': None,\n 'supported_instances': [supported_tuple]\n }", "def show_available_products():\n\n mongo = MongoDBConnection()\n result = {}\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n for document in productcollection.find({\"quantity_available\": {\"$gt\": \"0\"}}):\n key = document['product_id']\n\n result[key] = {\n 'description': document['description'],\n 'product_type': document['product_type'],\n 'quantity_available': document['quantity_available']\n }\n\n return result", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count", "def get_available_resource(self, nodename):\n curent_time = time.time()\n if curent_time - self.cleanup_time > CONF.azure.cleanup_span:\n self.cleanup_time = curent_time\n self._cleanup_deleted_os_disks()\n self._cleanup_deleted_nics()\n usage_family = 'basicAFamily'\n try:\n page = self.compute.usage.list(CONF.azure.location)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.ComputeUsageListFailure(reason=six.text_type(e))\n raise ex\n usages = [i for i in page]\n cores = 0\n cores_used = 0\n for i in usages:\n if hasattr(i, 'name') and hasattr(i.name, 'value'):\n if usage_family == i.name.value:\n cores = i.limit if hasattr(i, 'limit') else 0\n cores_used = i.current_value \\\n if hasattr(i, 'current_value') else 0\n break\n return {'vcpus': cores,\n 'memory_mb': 100000000,\n 'local_gb': 100000000,\n 'vcpus_used': cores_used,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': hv_type.HYPERV,\n 'hypervisor_version': 300,\n 'hypervisor_hostname': nodename,\n 'cpu_info': '{\"model\": [\"Intel(R) Xeon(R) CPU E5-2670 0 @ '\n '2.60GHz\"], \"topology\": {\"cores\": 16, \"threads\": '\n '32}}',\n 'supported_instances': [(arch.I686, hv_type.HYPERV,\n vm_mode.HVM),\n (arch.X86_64, hv_type.HYPERV,\n vm_mode.HVM)],\n 'numa_topology': None\n }", "def show_available_products():\n LOGGER.debug('Listing all available products.')\n available_products = {}\n with MongoDBConnection() as mongo:\n database = mongo.connection.hp_norton\n for product in database.products.find(\n {'quantity_available': {'$gt': 0}}):\n available_products[product['product_id']] = {\n 'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n return available_products", "def get_available_dbms(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/dbmss\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available DBMSs\"\n response_handler(response, error_msg)\n return response", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def _get_info_about_available_resources(self, min_ram, min_hdd, min_vcpus):\n vms_count = 0\n for hypervisor in self.nova_cli.hypervisors.list():\n if hypervisor.free_ram_mb >= min_ram:\n if hypervisor.free_disk_gb >= min_hdd:\n if hypervisor.vcpus - hypervisor.vcpus_used >= min_vcpus:\n # We need to determine how many VMs we can run\n # on this hypervisor\n free_cpu = hypervisor.vcpus - hypervisor.vcpus_used\n k1 = int(hypervisor.free_ram_mb / min_ram)\n k2 = int(hypervisor.free_disk_gb / min_hdd)\n k3 = int(free_cpu / min_vcpus)\n vms_count += min(k1, k2, k3)\n return vms_count", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def describe_availability_options(DomainName=None, Deployed=None):\n pass", "def show_available_products():\n products = DATABASE['product'].find({'quantity_available': {'$ne':'0'}})\n products_dict = {prod['product_id']:\n {'description': prod['description'],\n 'product_type': prod['product_type'],\n 'quantity_available': int(prod['quantity_available'])}\n for prod in products}\n return products_dict", "def get_available_db_drivers(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/drivers\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available database drivers\"\n response_handler(response, error_msg)\n return response", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_resource_available_in_dt_range(candidate_resources, dt_range,\n new_resource_occupations):\n for resource in candidate_resources:\n\n # Only occupations of current resource\n res_new_occupations = [y[1] for y in filter(\n lambda x: x[0] == clean_resource(resource),\n new_resource_occupations)]\n\n # Check availability\n availability = resource.get('availability')\n if (availability and not is_datetime_range_available(dt_range,\n availability)):\n continue\n\n # Check occupations\n occupations = resource.get('occupations', []) + res_new_occupations\n overlappings = [overlaps(dt_range, o) for o in occupations]\n if any(overlappings):\n continue\n\n return resource\n\n return None", "def test_aws_service_api_availability_zones_get(self):\n pass", "def oci_mysql_dbsystem_high_availability_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for mysqldbs in get_mysql_db_systems(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(mysqldbs,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = mysqldbs[\"compartment_id\"]\n mysqldbsId = mysqldbs[\"id\"]\n mysqldbsName = mysqldbs[\"display_name\"]\n lbLifecycleState = mysqldbs[\"lifecycle_state\"]\n createdAt = str(mysqldbs[\"time_created\"])\n\n if mysqldbs[\"is_highly_available\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is not highly available. A high availability DB system is made up of three MySQL instances: a primary instance and two secondary instances. Each MySQL instance utilizes the same amount of block volume storage, number of OCPUs, and amount of RAM defined in the shape chosen. The primary instance functions as a read/write endpoint and you have read/write access to the primary instance only. All data that you write to the primary instance is copied to the secondary instances asynchronously. The secondary instances are placed in different availability or fault domains. High availablility DB systems consume more resources (OCPUs, RAM, network bandwidth) than standalone DB systems. Hence the throughput and latency differ from the standalone DB systems. High availability uses MySQL Group Replication to replicate data from the primary instance to the secondary instances. The replication occurs over a secure, managed, internal network, unconnected to the VCN subnet you configured for the DB system. Limited information about this internal network is available in some Performance Schema tables, and you can neither connect to it nor view any other information related to it. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on High Availability refer to the Overview of High Availability section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/overview-high-availability.html#GUID-0387FC6B-73DF-4447-A206-3CBA2EB0FFB3\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is highly available.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Deletion Planning for Database Systems refer to the Advanced Option: Deletion Plan section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/advanced-options.html#MYAAS-GUID-29A995D2-1D40-4AE8-A654-FB6F40B07D85\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_get_cloud_resources(self):\n pass", "def update_available_resource(self, ctxt, host):\n LOG.debug(\"update_available_resource\")\n return", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def is_available():", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)", "def test_get_virtualization_realm_resources(self):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def ex_list_availability_zones(self, only_available=True):\n params = {'Action': 'DescribeAvailabilityZones'}\n\n if only_available:\n params.update({'Filter.0.Name': 'state'})\n params.update({'Filter.0.Value.0': 'available'})\n\n params.update({'Filter.1.Name': 'region-name'})\n params.update({'Filter.1.Value.0': self.region_name})\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n availability_zones = []\n for element in self._findall(result, 'availabilityZoneInfo/item'):\n name = self._findtext(element, 'zoneName')\n zone_state = self._findtext(element, 'zoneState')\n region_name = self._findtext(element, 'regionName')\n\n availability_zone = ExEC2AvailabilityZone(\n name=name,\n zone_state=zone_state,\n region_name=region_name\n )\n availability_zones.append(availability_zone)\n\n return availability_zones", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def update_available_resource(self, ctxt, host):\n return", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def list_allocation_candidates(req):\n context = req.environ['placement.context']\n context.can(policies.LIST)\n want_version = req.environ[microversion.MICROVERSION_ENVIRON]\n get_schema = _get_schema(want_version)\n util.validate_query_params(req, get_schema)\n\n rqparams = lib.RequestWideParams.from_request(req)\n groups = lib.RequestGroup.dict_from_request(req, rqparams)\n\n if not rqparams.group_policy:\n # group_policy is required if more than one numbered request group was\n # specified.\n if len([rg for rg in groups.values() if rg.use_same_provider]) > 1:\n raise webob.exc.HTTPBadRequest(\n 'The \"group_policy\" parameter is required when specifying '\n 'more than one \"resources{N}\" parameter.')\n\n # We can't be aware of nested architecture with old microversions\n nested_aware = want_version.matches((1, 29))\n\n try:\n cands = ac_obj.AllocationCandidates.get_by_requests(\n context, groups, rqparams, nested_aware=nested_aware)\n except exception.ResourceClassNotFound as exc:\n raise webob.exc.HTTPBadRequest(\n 'Invalid resource class in resources parameter: %(error)s' %\n {'error': exc})\n except exception.TraitNotFound as exc:\n raise webob.exc.HTTPBadRequest(str(exc))\n\n response = req.response\n trx_cands = _transform_allocation_candidates(cands, groups, want_version)\n json_data = jsonutils.dumps(trx_cands)\n response.body = encodeutils.to_utf8(json_data)\n response.content_type = 'application/json'\n if want_version.matches((1, 15)):\n response.cache_control = 'no-cache'\n response.last_modified = timeutils.utcnow(with_timezone=True)\n return response", "def ip_allocations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:\n return pulumi.get(self, \"ip_allocations\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def update_available_resource(self, context):\n new_resource_tracker_dict = {}\n\n compute_nodes_in_db = self._get_compute_nodes_in_db(context,\n use_slave=True)\n nodenames = set(self.driver.get_available_nodes())\n for nodename in nodenames:\n rt = self._get_resource_tracker(nodename)\n try:\n rt.update_available_resource(context)\n except exception.ComputeHostNotFound:\n # NOTE(comstud): We can get to this case if a node was\n # marked 'deleted' in the DB and then re-added with a\n # different auto-increment id. The cached resource\n # tracker tried to update a deleted record and failed.\n # Don't add this resource tracker to the new dict, so\n # that this will resolve itself on the next run.\n LOG.info(_LI(\"Compute node '%s' not found in \"\n \"update_available_resource.\"), nodename)\n continue\n except Exception:\n LOG.exception(_LE(\"Error updating resources for node \"\n \"%(node)s.\"), {'node': nodename})\n new_resource_tracker_dict[nodename] = rt\n\n # NOTE(comstud): Replace the RT cache before looping through\n # compute nodes to delete below, as we can end up doing greenthread\n # switches there. Best to have everyone using the newest cache\n # ASAP.\n self._resource_tracker_dict = new_resource_tracker_dict\n\n # Delete orphan compute node not reported by driver but still in db\n for cn in compute_nodes_in_db:\n if cn.hypervisor_hostname not in nodenames:\n LOG.info(_LI(\"Deleting orphan compute node %s\"), cn.id)\n cn.destroy()", "def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def GetAvailabilityOfConnection(ConnectionInfo, StartDate, EndDate):\r\n\tVerkehrstageHex = ConnectionInfo[ConnInfoInd['trafficdays_hexcode']]\r\n\treturn GetAvailabilityBetweenDates(StartDate, EndDate, VerkehrstageHex)", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()", "def test_list_cluster_resource_quota(self):\n pass", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def __init__(__self__,\n resource_name: str,\n args: DatabaseReplicaArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def get_available_agendas(self):\n pass", "def getcars():\n cars = Car.query.filter(Car.isavailable == True)\n result = carsSchema.dump(cars)\n print(result)\n return jsonify(result)", "def get_available_resources(threshold, usage, total):\n return dict((host, int(threshold * total[host] - resource))\n for host, resource in usage.items())", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def get_instance_ram_allocated(self, resource, period,\n aggregate, granularity=None):\n pass", "def count_orphan_resource_providers(db):\n sql = '''\\\n SELECT COUNT(*)\n FROM nova_api.resource_providers rp JOIN nova.compute_nodes cn\n ON cn.hypervisor_hostname = rp.name\n WHERE cn.deleted = 0\n AND rp.uuid != cn.uuid\n '''\n return db.query(sql)", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def test_read_cluster_resource_quota_status(self):\n pass", "def is_spatialized(resource):\n spatialized = False\n resource_id = resource['id']\n package_id=ckan_model.Resource.get(resource_id).resource_group.package_id\n package = ckan_model.Package.get(package_id)\n for resource in package.resources:\n if 'protocol' in resource.extras and 'parent_resource' in resource.extras:\n extras = resource.extras\n try:\n toolkit.get_action('resource_show')(None, { 'id':resource.id,'for-view':True })\n except (NotFound):\n continue\n\n if extras['parent_resource'] == resource_id\\\n and ( extras['protocol'].lower() == 'ogc:wms' or extras['ogc_type'].lower() == 'ogc:wfs'):\n print resource.state\n if resource.state !='active':\n return False\n spatialized = True\n break\n return spatialized", "def get_available_images():\n return AVAILABLE_IMAGES", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def list_availability_definition(self):\n return self._get(path='availability')", "def spark_list(provider):\n api.available(provider)", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def get_oracle(verbosity, resultset, providerversion):\n try:\n response = requests.get(ORACLEAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n providerversion[\"ORACLE\"] = cidrdata[\"last_updated_timestamp\"]\n for i in range(0, len(cidrdata[\"regions\"])):\n for j in range(0, len(cidrdata[\"regions\"][i][\"cidrs\"])):\n if cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"] not in resultset:\n resultset[cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"]] = \"Oracle\"\n\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n\n return resultset, providerversion", "def test_read_cluster_resource_quota(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def validate_availability_zones(self, context, resource_type,\n availability_zones):", "def available(self):\n return self[\"available\"]", "def describe_availability_zones_with_options(\n self,\n request: dds_20151201_models.DescribeAvailabilityZonesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeAvailabilityZonesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.accept_language):\n query['AcceptLanguage'] = request.accept_language\n if not UtilClient.is_unset(request.db_type):\n query['DbType'] = request.db_type\n if not UtilClient.is_unset(request.exclude_secondary_zone_id):\n query['ExcludeSecondaryZoneId'] = request.exclude_secondary_zone_id\n if not UtilClient.is_unset(request.exclude_zone_id):\n query['ExcludeZoneId'] = request.exclude_zone_id\n if not UtilClient.is_unset(request.instance_charge_type):\n query['InstanceChargeType'] = request.instance_charge_type\n if not UtilClient.is_unset(request.mongo_type):\n query['MongoType'] = request.mongo_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.storage_support):\n query['StorageSupport'] = request.storage_support\n if not UtilClient.is_unset(request.storage_type):\n query['StorageType'] = request.storage_type\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeAvailabilityZones',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeAvailabilityZonesResponse(),\n self.call_api(params, req, runtime)\n )", "def ComputeEAvailable(self):\r\n pass", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def is_available(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n args: DatabaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RegionPerInstanceConfigArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def GetApiCollection(resource_type):\n return 'compute.' + resource_type", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db" ]
[ "0.60528797", "0.5757612", "0.575394", "0.57174635", "0.5696347", "0.56500214", "0.5415369", "0.5368649", "0.53264123", "0.5321517", "0.5281705", "0.5280499", "0.52654624", "0.52461064", "0.5228316", "0.5189627", "0.512781", "0.5104357", "0.5083543", "0.5081698", "0.50655764", "0.49858958", "0.49348488", "0.49292126", "0.49187896", "0.49132764", "0.49063373", "0.48762184", "0.48728195", "0.48615557", "0.48489046", "0.4848287", "0.48400953", "0.48184758", "0.48131365", "0.4794416", "0.479214", "0.47865447", "0.4784897", "0.47799528", "0.47710806", "0.47586882", "0.47554076", "0.47501436", "0.47453886", "0.4743101", "0.47086695", "0.46795866", "0.4673606", "0.46689352", "0.4660352", "0.46562693", "0.46488604", "0.46428424", "0.46238327", "0.46195608", "0.46184093", "0.46152377", "0.46110696", "0.46049273", "0.46000922", "0.4597859", "0.45949218", "0.45927447", "0.45877847", "0.45582354", "0.45544615", "0.45386866", "0.45332938", "0.4532112", "0.4531858", "0.4531776", "0.45316157", "0.4528408", "0.45205277", "0.4509386", "0.45054537", "0.44975087", "0.44778657", "0.44683605", "0.44667342", "0.4464291", "0.44513994", "0.4443954", "0.44383284", "0.44244188", "0.44244188", "0.4407312", "0.44047502", "0.43948585", "0.43936852", "0.4392813", "0.4392813", "0.4392813", "0.43910906", "0.43882194", "0.43856895", "0.43706858", "0.43668258", "0.4360832", "0.43587157" ]
0.0
-1
> To query available regions and zones where ApsaraDB for MongoDB instances can be created, call the [DescribeAvailableResource](~~149719~~) operation.
async def describe_regions_with_options_async( self, request: dds_20151201_models.DescribeRegionsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRegionsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accept_language): query['AcceptLanguage'] = request.accept_language if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRegions', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRegionsResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_resource(self, nodename):\n LOG.debug(\"get_available_resource\")\n\n dictval = self._host.properties\n\n return dictval", "def get_available_resource(self, nodename):\n if nodename not in self._drv_nodes:\n return {}\n supported_tuple = ('IA64', 'kvm', 'hvm')\n return {\n 'vcpus': drv_conf.max_vcpus,\n 'memory_mb': drv_conf.max_memory_mb,\n 'local_gb': drv_conf.max_disk_gb,\n 'vcpus_used': 0,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': self.name,\n 'hypervisor_version': '1',\n 'hypervisor_hostname': nodename,\n 'disk_available_least': 0,\n 'cpu_info': '?',\n 'numa_topology': None,\n 'supported_instances': [supported_tuple]\n }", "def show_available_products():\n\n mongo = MongoDBConnection()\n result = {}\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n for document in productcollection.find({\"quantity_available\": {\"$gt\": \"0\"}}):\n key = document['product_id']\n\n result[key] = {\n 'description': document['description'],\n 'product_type': document['product_type'],\n 'quantity_available': document['quantity_available']\n }\n\n return result", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count", "def get_available_resource(self, nodename):\n curent_time = time.time()\n if curent_time - self.cleanup_time > CONF.azure.cleanup_span:\n self.cleanup_time = curent_time\n self._cleanup_deleted_os_disks()\n self._cleanup_deleted_nics()\n usage_family = 'basicAFamily'\n try:\n page = self.compute.usage.list(CONF.azure.location)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.ComputeUsageListFailure(reason=six.text_type(e))\n raise ex\n usages = [i for i in page]\n cores = 0\n cores_used = 0\n for i in usages:\n if hasattr(i, 'name') and hasattr(i.name, 'value'):\n if usage_family == i.name.value:\n cores = i.limit if hasattr(i, 'limit') else 0\n cores_used = i.current_value \\\n if hasattr(i, 'current_value') else 0\n break\n return {'vcpus': cores,\n 'memory_mb': 100000000,\n 'local_gb': 100000000,\n 'vcpus_used': cores_used,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': hv_type.HYPERV,\n 'hypervisor_version': 300,\n 'hypervisor_hostname': nodename,\n 'cpu_info': '{\"model\": [\"Intel(R) Xeon(R) CPU E5-2670 0 @ '\n '2.60GHz\"], \"topology\": {\"cores\": 16, \"threads\": '\n '32}}',\n 'supported_instances': [(arch.I686, hv_type.HYPERV,\n vm_mode.HVM),\n (arch.X86_64, hv_type.HYPERV,\n vm_mode.HVM)],\n 'numa_topology': None\n }", "def show_available_products():\n LOGGER.debug('Listing all available products.')\n available_products = {}\n with MongoDBConnection() as mongo:\n database = mongo.connection.hp_norton\n for product in database.products.find(\n {'quantity_available': {'$gt': 0}}):\n available_products[product['product_id']] = {\n 'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n return available_products", "def get_available_dbms(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/dbmss\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available DBMSs\"\n response_handler(response, error_msg)\n return response", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def _get_info_about_available_resources(self, min_ram, min_hdd, min_vcpus):\n vms_count = 0\n for hypervisor in self.nova_cli.hypervisors.list():\n if hypervisor.free_ram_mb >= min_ram:\n if hypervisor.free_disk_gb >= min_hdd:\n if hypervisor.vcpus - hypervisor.vcpus_used >= min_vcpus:\n # We need to determine how many VMs we can run\n # on this hypervisor\n free_cpu = hypervisor.vcpus - hypervisor.vcpus_used\n k1 = int(hypervisor.free_ram_mb / min_ram)\n k2 = int(hypervisor.free_disk_gb / min_hdd)\n k3 = int(free_cpu / min_vcpus)\n vms_count += min(k1, k2, k3)\n return vms_count", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def describe_availability_options(DomainName=None, Deployed=None):\n pass", "def show_available_products():\n products = DATABASE['product'].find({'quantity_available': {'$ne':'0'}})\n products_dict = {prod['product_id']:\n {'description': prod['description'],\n 'product_type': prod['product_type'],\n 'quantity_available': int(prod['quantity_available'])}\n for prod in products}\n return products_dict", "def get_available_db_drivers(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/drivers\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available database drivers\"\n response_handler(response, error_msg)\n return response", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_resource_available_in_dt_range(candidate_resources, dt_range,\n new_resource_occupations):\n for resource in candidate_resources:\n\n # Only occupations of current resource\n res_new_occupations = [y[1] for y in filter(\n lambda x: x[0] == clean_resource(resource),\n new_resource_occupations)]\n\n # Check availability\n availability = resource.get('availability')\n if (availability and not is_datetime_range_available(dt_range,\n availability)):\n continue\n\n # Check occupations\n occupations = resource.get('occupations', []) + res_new_occupations\n overlappings = [overlaps(dt_range, o) for o in occupations]\n if any(overlappings):\n continue\n\n return resource\n\n return None", "def test_aws_service_api_availability_zones_get(self):\n pass", "def oci_mysql_dbsystem_high_availability_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for mysqldbs in get_mysql_db_systems(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(mysqldbs,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = mysqldbs[\"compartment_id\"]\n mysqldbsId = mysqldbs[\"id\"]\n mysqldbsName = mysqldbs[\"display_name\"]\n lbLifecycleState = mysqldbs[\"lifecycle_state\"]\n createdAt = str(mysqldbs[\"time_created\"])\n\n if mysqldbs[\"is_highly_available\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is not highly available. A high availability DB system is made up of three MySQL instances: a primary instance and two secondary instances. Each MySQL instance utilizes the same amount of block volume storage, number of OCPUs, and amount of RAM defined in the shape chosen. The primary instance functions as a read/write endpoint and you have read/write access to the primary instance only. All data that you write to the primary instance is copied to the secondary instances asynchronously. The secondary instances are placed in different availability or fault domains. High availablility DB systems consume more resources (OCPUs, RAM, network bandwidth) than standalone DB systems. Hence the throughput and latency differ from the standalone DB systems. High availability uses MySQL Group Replication to replicate data from the primary instance to the secondary instances. The replication occurs over a secure, managed, internal network, unconnected to the VCN subnet you configured for the DB system. Limited information about this internal network is available in some Performance Schema tables, and you can neither connect to it nor view any other information related to it. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on High Availability refer to the Overview of High Availability section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/overview-high-availability.html#GUID-0387FC6B-73DF-4447-A206-3CBA2EB0FFB3\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is highly available.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Deletion Planning for Database Systems refer to the Advanced Option: Deletion Plan section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/advanced-options.html#MYAAS-GUID-29A995D2-1D40-4AE8-A654-FB6F40B07D85\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_get_cloud_resources(self):\n pass", "def update_available_resource(self, ctxt, host):\n LOG.debug(\"update_available_resource\")\n return", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def is_available():", "def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def test_get_virtualization_realm_resources(self):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def ex_list_availability_zones(self, only_available=True):\n params = {'Action': 'DescribeAvailabilityZones'}\n\n if only_available:\n params.update({'Filter.0.Name': 'state'})\n params.update({'Filter.0.Value.0': 'available'})\n\n params.update({'Filter.1.Name': 'region-name'})\n params.update({'Filter.1.Value.0': self.region_name})\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n availability_zones = []\n for element in self._findall(result, 'availabilityZoneInfo/item'):\n name = self._findtext(element, 'zoneName')\n zone_state = self._findtext(element, 'zoneState')\n region_name = self._findtext(element, 'regionName')\n\n availability_zone = ExEC2AvailabilityZone(\n name=name,\n zone_state=zone_state,\n region_name=region_name\n )\n availability_zones.append(availability_zone)\n\n return availability_zones", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def update_available_resource(self, ctxt, host):\n return", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def list_allocation_candidates(req):\n context = req.environ['placement.context']\n context.can(policies.LIST)\n want_version = req.environ[microversion.MICROVERSION_ENVIRON]\n get_schema = _get_schema(want_version)\n util.validate_query_params(req, get_schema)\n\n rqparams = lib.RequestWideParams.from_request(req)\n groups = lib.RequestGroup.dict_from_request(req, rqparams)\n\n if not rqparams.group_policy:\n # group_policy is required if more than one numbered request group was\n # specified.\n if len([rg for rg in groups.values() if rg.use_same_provider]) > 1:\n raise webob.exc.HTTPBadRequest(\n 'The \"group_policy\" parameter is required when specifying '\n 'more than one \"resources{N}\" parameter.')\n\n # We can't be aware of nested architecture with old microversions\n nested_aware = want_version.matches((1, 29))\n\n try:\n cands = ac_obj.AllocationCandidates.get_by_requests(\n context, groups, rqparams, nested_aware=nested_aware)\n except exception.ResourceClassNotFound as exc:\n raise webob.exc.HTTPBadRequest(\n 'Invalid resource class in resources parameter: %(error)s' %\n {'error': exc})\n except exception.TraitNotFound as exc:\n raise webob.exc.HTTPBadRequest(str(exc))\n\n response = req.response\n trx_cands = _transform_allocation_candidates(cands, groups, want_version)\n json_data = jsonutils.dumps(trx_cands)\n response.body = encodeutils.to_utf8(json_data)\n response.content_type = 'application/json'\n if want_version.matches((1, 15)):\n response.cache_control = 'no-cache'\n response.last_modified = timeutils.utcnow(with_timezone=True)\n return response", "def ip_allocations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:\n return pulumi.get(self, \"ip_allocations\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def update_available_resource(self, context):\n new_resource_tracker_dict = {}\n\n compute_nodes_in_db = self._get_compute_nodes_in_db(context,\n use_slave=True)\n nodenames = set(self.driver.get_available_nodes())\n for nodename in nodenames:\n rt = self._get_resource_tracker(nodename)\n try:\n rt.update_available_resource(context)\n except exception.ComputeHostNotFound:\n # NOTE(comstud): We can get to this case if a node was\n # marked 'deleted' in the DB and then re-added with a\n # different auto-increment id. The cached resource\n # tracker tried to update a deleted record and failed.\n # Don't add this resource tracker to the new dict, so\n # that this will resolve itself on the next run.\n LOG.info(_LI(\"Compute node '%s' not found in \"\n \"update_available_resource.\"), nodename)\n continue\n except Exception:\n LOG.exception(_LE(\"Error updating resources for node \"\n \"%(node)s.\"), {'node': nodename})\n new_resource_tracker_dict[nodename] = rt\n\n # NOTE(comstud): Replace the RT cache before looping through\n # compute nodes to delete below, as we can end up doing greenthread\n # switches there. Best to have everyone using the newest cache\n # ASAP.\n self._resource_tracker_dict = new_resource_tracker_dict\n\n # Delete orphan compute node not reported by driver but still in db\n for cn in compute_nodes_in_db:\n if cn.hypervisor_hostname not in nodenames:\n LOG.info(_LI(\"Deleting orphan compute node %s\"), cn.id)\n cn.destroy()", "def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def GetAvailabilityOfConnection(ConnectionInfo, StartDate, EndDate):\r\n\tVerkehrstageHex = ConnectionInfo[ConnInfoInd['trafficdays_hexcode']]\r\n\treturn GetAvailabilityBetweenDates(StartDate, EndDate, VerkehrstageHex)", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()", "def test_list_cluster_resource_quota(self):\n pass", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def __init__(__self__,\n resource_name: str,\n args: DatabaseReplicaArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def get_available_agendas(self):\n pass", "def getcars():\n cars = Car.query.filter(Car.isavailable == True)\n result = carsSchema.dump(cars)\n print(result)\n return jsonify(result)", "def get_available_resources(threshold, usage, total):\n return dict((host, int(threshold * total[host] - resource))\n for host, resource in usage.items())", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def get_instance_ram_allocated(self, resource, period,\n aggregate, granularity=None):\n pass", "def is_spatialized(resource):\n spatialized = False\n resource_id = resource['id']\n package_id=ckan_model.Resource.get(resource_id).resource_group.package_id\n package = ckan_model.Package.get(package_id)\n for resource in package.resources:\n if 'protocol' in resource.extras and 'parent_resource' in resource.extras:\n extras = resource.extras\n try:\n toolkit.get_action('resource_show')(None, { 'id':resource.id,'for-view':True })\n except (NotFound):\n continue\n\n if extras['parent_resource'] == resource_id\\\n and ( extras['protocol'].lower() == 'ogc:wms' or extras['ogc_type'].lower() == 'ogc:wfs'):\n print resource.state\n if resource.state !='active':\n return False\n spatialized = True\n break\n return spatialized", "def test_read_cluster_resource_quota_status(self):\n pass", "def count_orphan_resource_providers(db):\n sql = '''\\\n SELECT COUNT(*)\n FROM nova_api.resource_providers rp JOIN nova.compute_nodes cn\n ON cn.hypervisor_hostname = rp.name\n WHERE cn.deleted = 0\n AND rp.uuid != cn.uuid\n '''\n return db.query(sql)", "def get_available_images():\n return AVAILABLE_IMAGES", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def list_availability_definition(self):\n return self._get(path='availability')", "def spark_list(provider):\n api.available(provider)", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def get_oracle(verbosity, resultset, providerversion):\n try:\n response = requests.get(ORACLEAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n providerversion[\"ORACLE\"] = cidrdata[\"last_updated_timestamp\"]\n for i in range(0, len(cidrdata[\"regions\"])):\n for j in range(0, len(cidrdata[\"regions\"][i][\"cidrs\"])):\n if cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"] not in resultset:\n resultset[cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"]] = \"Oracle\"\n\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n\n return resultset, providerversion", "def test_read_cluster_resource_quota(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def validate_availability_zones(self, context, resource_type,\n availability_zones):", "def available(self):\n return self[\"available\"]", "def describe_availability_zones_with_options(\n self,\n request: dds_20151201_models.DescribeAvailabilityZonesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeAvailabilityZonesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.accept_language):\n query['AcceptLanguage'] = request.accept_language\n if not UtilClient.is_unset(request.db_type):\n query['DbType'] = request.db_type\n if not UtilClient.is_unset(request.exclude_secondary_zone_id):\n query['ExcludeSecondaryZoneId'] = request.exclude_secondary_zone_id\n if not UtilClient.is_unset(request.exclude_zone_id):\n query['ExcludeZoneId'] = request.exclude_zone_id\n if not UtilClient.is_unset(request.instance_charge_type):\n query['InstanceChargeType'] = request.instance_charge_type\n if not UtilClient.is_unset(request.mongo_type):\n query['MongoType'] = request.mongo_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.storage_support):\n query['StorageSupport'] = request.storage_support\n if not UtilClient.is_unset(request.storage_type):\n query['StorageType'] = request.storage_type\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeAvailabilityZones',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeAvailabilityZonesResponse(),\n self.call_api(params, req, runtime)\n )", "def ComputeEAvailable(self):\r\n pass", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def is_available(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n args: DatabaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RegionPerInstanceConfigArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def GetApiCollection(resource_type):\n return 'compute.' + resource_type", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db" ]
[ "0.6054931", "0.57605314", "0.5754169", "0.5718132", "0.569816", "0.5650721", "0.5415175", "0.5370477", "0.5326024", "0.5322731", "0.5281802", "0.52804214", "0.5266299", "0.52456385", "0.5227921", "0.5190761", "0.51283115", "0.510333", "0.5085657", "0.5081566", "0.5064062", "0.49869186", "0.49352258", "0.49311998", "0.49176314", "0.4913858", "0.4906032", "0.48790002", "0.48710015", "0.4863367", "0.4850168", "0.48474655", "0.4838957", "0.48194003", "0.48129132", "0.4794957", "0.47921702", "0.47857723", "0.4784148", "0.47825238", "0.4774589", "0.47563076", "0.47540623", "0.474862", "0.47458035", "0.47423044", "0.47076607", "0.46771416", "0.4675098", "0.46719494", "0.46611172", "0.46554068", "0.4649321", "0.4642971", "0.46228418", "0.46200344", "0.46186227", "0.46145114", "0.46130016", "0.460532", "0.4600478", "0.4599209", "0.45934206", "0.4591554", "0.4589859", "0.45587352", "0.45539317", "0.45369038", "0.45337924", "0.4533664", "0.45326778", "0.45323145", "0.45320418", "0.45291638", "0.45194122", "0.4509978", "0.45056704", "0.4498015", "0.44782332", "0.44690514", "0.44662988", "0.44638997", "0.44533837", "0.44426662", "0.44411948", "0.4424326", "0.4424326", "0.44095182", "0.44051018", "0.43952173", "0.43942183", "0.43942183", "0.43942183", "0.43940553", "0.43892556", "0.438718", "0.43838248", "0.43689787", "0.4364211", "0.43615255", "0.4356521" ]
0.0
-1
> To query available regions and zones where ApsaraDB for MongoDB instances can be created, call the [DescribeAvailableResource](~~149719~~) operation.
def describe_regions( self, request: dds_20151201_models.DescribeRegionsRequest, ) -> dds_20151201_models.DescribeRegionsResponse: runtime = util_models.RuntimeOptions() return self.describe_regions_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_resource(self, nodename):\n LOG.debug(\"get_available_resource\")\n\n dictval = self._host.properties\n\n return dictval", "def get_available_resource(self, nodename):\n if nodename not in self._drv_nodes:\n return {}\n supported_tuple = ('IA64', 'kvm', 'hvm')\n return {\n 'vcpus': drv_conf.max_vcpus,\n 'memory_mb': drv_conf.max_memory_mb,\n 'local_gb': drv_conf.max_disk_gb,\n 'vcpus_used': 0,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': self.name,\n 'hypervisor_version': '1',\n 'hypervisor_hostname': nodename,\n 'disk_available_least': 0,\n 'cpu_info': '?',\n 'numa_topology': None,\n 'supported_instances': [supported_tuple]\n }", "def show_available_products():\n\n mongo = MongoDBConnection()\n result = {}\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n for document in productcollection.find({\"quantity_available\": {\"$gt\": \"0\"}}):\n key = document['product_id']\n\n result[key] = {\n 'description': document['description'],\n 'product_type': document['product_type'],\n 'quantity_available': document['quantity_available']\n }\n\n return result", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count", "def get_available_resource(self, nodename):\n curent_time = time.time()\n if curent_time - self.cleanup_time > CONF.azure.cleanup_span:\n self.cleanup_time = curent_time\n self._cleanup_deleted_os_disks()\n self._cleanup_deleted_nics()\n usage_family = 'basicAFamily'\n try:\n page = self.compute.usage.list(CONF.azure.location)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.ComputeUsageListFailure(reason=six.text_type(e))\n raise ex\n usages = [i for i in page]\n cores = 0\n cores_used = 0\n for i in usages:\n if hasattr(i, 'name') and hasattr(i.name, 'value'):\n if usage_family == i.name.value:\n cores = i.limit if hasattr(i, 'limit') else 0\n cores_used = i.current_value \\\n if hasattr(i, 'current_value') else 0\n break\n return {'vcpus': cores,\n 'memory_mb': 100000000,\n 'local_gb': 100000000,\n 'vcpus_used': cores_used,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': hv_type.HYPERV,\n 'hypervisor_version': 300,\n 'hypervisor_hostname': nodename,\n 'cpu_info': '{\"model\": [\"Intel(R) Xeon(R) CPU E5-2670 0 @ '\n '2.60GHz\"], \"topology\": {\"cores\": 16, \"threads\": '\n '32}}',\n 'supported_instances': [(arch.I686, hv_type.HYPERV,\n vm_mode.HVM),\n (arch.X86_64, hv_type.HYPERV,\n vm_mode.HVM)],\n 'numa_topology': None\n }", "def show_available_products():\n LOGGER.debug('Listing all available products.')\n available_products = {}\n with MongoDBConnection() as mongo:\n database = mongo.connection.hp_norton\n for product in database.products.find(\n {'quantity_available': {'$gt': 0}}):\n available_products[product['product_id']] = {\n 'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n return available_products", "def get_available_dbms(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/dbmss\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available DBMSs\"\n response_handler(response, error_msg)\n return response", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def _get_info_about_available_resources(self, min_ram, min_hdd, min_vcpus):\n vms_count = 0\n for hypervisor in self.nova_cli.hypervisors.list():\n if hypervisor.free_ram_mb >= min_ram:\n if hypervisor.free_disk_gb >= min_hdd:\n if hypervisor.vcpus - hypervisor.vcpus_used >= min_vcpus:\n # We need to determine how many VMs we can run\n # on this hypervisor\n free_cpu = hypervisor.vcpus - hypervisor.vcpus_used\n k1 = int(hypervisor.free_ram_mb / min_ram)\n k2 = int(hypervisor.free_disk_gb / min_hdd)\n k3 = int(free_cpu / min_vcpus)\n vms_count += min(k1, k2, k3)\n return vms_count", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def describe_availability_options(DomainName=None, Deployed=None):\n pass", "def show_available_products():\n products = DATABASE['product'].find({'quantity_available': {'$ne':'0'}})\n products_dict = {prod['product_id']:\n {'description': prod['description'],\n 'product_type': prod['product_type'],\n 'quantity_available': int(prod['quantity_available'])}\n for prod in products}\n return products_dict", "def get_available_db_drivers(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/drivers\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available database drivers\"\n response_handler(response, error_msg)\n return response", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_resource_available_in_dt_range(candidate_resources, dt_range,\n new_resource_occupations):\n for resource in candidate_resources:\n\n # Only occupations of current resource\n res_new_occupations = [y[1] for y in filter(\n lambda x: x[0] == clean_resource(resource),\n new_resource_occupations)]\n\n # Check availability\n availability = resource.get('availability')\n if (availability and not is_datetime_range_available(dt_range,\n availability)):\n continue\n\n # Check occupations\n occupations = resource.get('occupations', []) + res_new_occupations\n overlappings = [overlaps(dt_range, o) for o in occupations]\n if any(overlappings):\n continue\n\n return resource\n\n return None", "def test_aws_service_api_availability_zones_get(self):\n pass", "def oci_mysql_dbsystem_high_availability_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for mysqldbs in get_mysql_db_systems(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(mysqldbs,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = mysqldbs[\"compartment_id\"]\n mysqldbsId = mysqldbs[\"id\"]\n mysqldbsName = mysqldbs[\"display_name\"]\n lbLifecycleState = mysqldbs[\"lifecycle_state\"]\n createdAt = str(mysqldbs[\"time_created\"])\n\n if mysqldbs[\"is_highly_available\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is not highly available. A high availability DB system is made up of three MySQL instances: a primary instance and two secondary instances. Each MySQL instance utilizes the same amount of block volume storage, number of OCPUs, and amount of RAM defined in the shape chosen. The primary instance functions as a read/write endpoint and you have read/write access to the primary instance only. All data that you write to the primary instance is copied to the secondary instances asynchronously. The secondary instances are placed in different availability or fault domains. High availablility DB systems consume more resources (OCPUs, RAM, network bandwidth) than standalone DB systems. Hence the throughput and latency differ from the standalone DB systems. High availability uses MySQL Group Replication to replicate data from the primary instance to the secondary instances. The replication occurs over a secure, managed, internal network, unconnected to the VCN subnet you configured for the DB system. Limited information about this internal network is available in some Performance Schema tables, and you can neither connect to it nor view any other information related to it. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on High Availability refer to the Overview of High Availability section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/overview-high-availability.html#GUID-0387FC6B-73DF-4447-A206-3CBA2EB0FFB3\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is highly available.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Deletion Planning for Database Systems refer to the Advanced Option: Deletion Plan section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/advanced-options.html#MYAAS-GUID-29A995D2-1D40-4AE8-A654-FB6F40B07D85\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_get_cloud_resources(self):\n pass", "def update_available_resource(self, ctxt, host):\n LOG.debug(\"update_available_resource\")\n return", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def is_available():", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)", "def test_get_virtualization_realm_resources(self):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def ex_list_availability_zones(self, only_available=True):\n params = {'Action': 'DescribeAvailabilityZones'}\n\n if only_available:\n params.update({'Filter.0.Name': 'state'})\n params.update({'Filter.0.Value.0': 'available'})\n\n params.update({'Filter.1.Name': 'region-name'})\n params.update({'Filter.1.Value.0': self.region_name})\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n availability_zones = []\n for element in self._findall(result, 'availabilityZoneInfo/item'):\n name = self._findtext(element, 'zoneName')\n zone_state = self._findtext(element, 'zoneState')\n region_name = self._findtext(element, 'regionName')\n\n availability_zone = ExEC2AvailabilityZone(\n name=name,\n zone_state=zone_state,\n region_name=region_name\n )\n availability_zones.append(availability_zone)\n\n return availability_zones", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def update_available_resource(self, ctxt, host):\n return", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def list_allocation_candidates(req):\n context = req.environ['placement.context']\n context.can(policies.LIST)\n want_version = req.environ[microversion.MICROVERSION_ENVIRON]\n get_schema = _get_schema(want_version)\n util.validate_query_params(req, get_schema)\n\n rqparams = lib.RequestWideParams.from_request(req)\n groups = lib.RequestGroup.dict_from_request(req, rqparams)\n\n if not rqparams.group_policy:\n # group_policy is required if more than one numbered request group was\n # specified.\n if len([rg for rg in groups.values() if rg.use_same_provider]) > 1:\n raise webob.exc.HTTPBadRequest(\n 'The \"group_policy\" parameter is required when specifying '\n 'more than one \"resources{N}\" parameter.')\n\n # We can't be aware of nested architecture with old microversions\n nested_aware = want_version.matches((1, 29))\n\n try:\n cands = ac_obj.AllocationCandidates.get_by_requests(\n context, groups, rqparams, nested_aware=nested_aware)\n except exception.ResourceClassNotFound as exc:\n raise webob.exc.HTTPBadRequest(\n 'Invalid resource class in resources parameter: %(error)s' %\n {'error': exc})\n except exception.TraitNotFound as exc:\n raise webob.exc.HTTPBadRequest(str(exc))\n\n response = req.response\n trx_cands = _transform_allocation_candidates(cands, groups, want_version)\n json_data = jsonutils.dumps(trx_cands)\n response.body = encodeutils.to_utf8(json_data)\n response.content_type = 'application/json'\n if want_version.matches((1, 15)):\n response.cache_control = 'no-cache'\n response.last_modified = timeutils.utcnow(with_timezone=True)\n return response", "def ip_allocations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:\n return pulumi.get(self, \"ip_allocations\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def update_available_resource(self, context):\n new_resource_tracker_dict = {}\n\n compute_nodes_in_db = self._get_compute_nodes_in_db(context,\n use_slave=True)\n nodenames = set(self.driver.get_available_nodes())\n for nodename in nodenames:\n rt = self._get_resource_tracker(nodename)\n try:\n rt.update_available_resource(context)\n except exception.ComputeHostNotFound:\n # NOTE(comstud): We can get to this case if a node was\n # marked 'deleted' in the DB and then re-added with a\n # different auto-increment id. The cached resource\n # tracker tried to update a deleted record and failed.\n # Don't add this resource tracker to the new dict, so\n # that this will resolve itself on the next run.\n LOG.info(_LI(\"Compute node '%s' not found in \"\n \"update_available_resource.\"), nodename)\n continue\n except Exception:\n LOG.exception(_LE(\"Error updating resources for node \"\n \"%(node)s.\"), {'node': nodename})\n new_resource_tracker_dict[nodename] = rt\n\n # NOTE(comstud): Replace the RT cache before looping through\n # compute nodes to delete below, as we can end up doing greenthread\n # switches there. Best to have everyone using the newest cache\n # ASAP.\n self._resource_tracker_dict = new_resource_tracker_dict\n\n # Delete orphan compute node not reported by driver but still in db\n for cn in compute_nodes_in_db:\n if cn.hypervisor_hostname not in nodenames:\n LOG.info(_LI(\"Deleting orphan compute node %s\"), cn.id)\n cn.destroy()", "def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def GetAvailabilityOfConnection(ConnectionInfo, StartDate, EndDate):\r\n\tVerkehrstageHex = ConnectionInfo[ConnInfoInd['trafficdays_hexcode']]\r\n\treturn GetAvailabilityBetweenDates(StartDate, EndDate, VerkehrstageHex)", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()", "def test_list_cluster_resource_quota(self):\n pass", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def __init__(__self__,\n resource_name: str,\n args: DatabaseReplicaArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def get_available_agendas(self):\n pass", "def getcars():\n cars = Car.query.filter(Car.isavailable == True)\n result = carsSchema.dump(cars)\n print(result)\n return jsonify(result)", "def get_available_resources(threshold, usage, total):\n return dict((host, int(threshold * total[host] - resource))\n for host, resource in usage.items())", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def get_instance_ram_allocated(self, resource, period,\n aggregate, granularity=None):\n pass", "def count_orphan_resource_providers(db):\n sql = '''\\\n SELECT COUNT(*)\n FROM nova_api.resource_providers rp JOIN nova.compute_nodes cn\n ON cn.hypervisor_hostname = rp.name\n WHERE cn.deleted = 0\n AND rp.uuid != cn.uuid\n '''\n return db.query(sql)", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def test_read_cluster_resource_quota_status(self):\n pass", "def is_spatialized(resource):\n spatialized = False\n resource_id = resource['id']\n package_id=ckan_model.Resource.get(resource_id).resource_group.package_id\n package = ckan_model.Package.get(package_id)\n for resource in package.resources:\n if 'protocol' in resource.extras and 'parent_resource' in resource.extras:\n extras = resource.extras\n try:\n toolkit.get_action('resource_show')(None, { 'id':resource.id,'for-view':True })\n except (NotFound):\n continue\n\n if extras['parent_resource'] == resource_id\\\n and ( extras['protocol'].lower() == 'ogc:wms' or extras['ogc_type'].lower() == 'ogc:wfs'):\n print resource.state\n if resource.state !='active':\n return False\n spatialized = True\n break\n return spatialized", "def get_available_images():\n return AVAILABLE_IMAGES", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def list_availability_definition(self):\n return self._get(path='availability')", "def spark_list(provider):\n api.available(provider)", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def get_oracle(verbosity, resultset, providerversion):\n try:\n response = requests.get(ORACLEAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n providerversion[\"ORACLE\"] = cidrdata[\"last_updated_timestamp\"]\n for i in range(0, len(cidrdata[\"regions\"])):\n for j in range(0, len(cidrdata[\"regions\"][i][\"cidrs\"])):\n if cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"] not in resultset:\n resultset[cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"]] = \"Oracle\"\n\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n\n return resultset, providerversion", "def test_read_cluster_resource_quota(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def validate_availability_zones(self, context, resource_type,\n availability_zones):", "def available(self):\n return self[\"available\"]", "def describe_availability_zones_with_options(\n self,\n request: dds_20151201_models.DescribeAvailabilityZonesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeAvailabilityZonesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.accept_language):\n query['AcceptLanguage'] = request.accept_language\n if not UtilClient.is_unset(request.db_type):\n query['DbType'] = request.db_type\n if not UtilClient.is_unset(request.exclude_secondary_zone_id):\n query['ExcludeSecondaryZoneId'] = request.exclude_secondary_zone_id\n if not UtilClient.is_unset(request.exclude_zone_id):\n query['ExcludeZoneId'] = request.exclude_zone_id\n if not UtilClient.is_unset(request.instance_charge_type):\n query['InstanceChargeType'] = request.instance_charge_type\n if not UtilClient.is_unset(request.mongo_type):\n query['MongoType'] = request.mongo_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.storage_support):\n query['StorageSupport'] = request.storage_support\n if not UtilClient.is_unset(request.storage_type):\n query['StorageType'] = request.storage_type\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeAvailabilityZones',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeAvailabilityZonesResponse(),\n self.call_api(params, req, runtime)\n )", "def ComputeEAvailable(self):\r\n pass", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def is_available(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n args: DatabaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RegionPerInstanceConfigArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def GetApiCollection(resource_type):\n return 'compute.' + resource_type", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db" ]
[ "0.60528797", "0.5757612", "0.575394", "0.57174635", "0.5696347", "0.56500214", "0.5415369", "0.5368649", "0.53264123", "0.5321517", "0.5281705", "0.5280499", "0.52654624", "0.52461064", "0.5228316", "0.5189627", "0.512781", "0.5104357", "0.5083543", "0.5081698", "0.50655764", "0.49858958", "0.49348488", "0.49292126", "0.49187896", "0.49132764", "0.49063373", "0.48762184", "0.48728195", "0.48615557", "0.48489046", "0.4848287", "0.48400953", "0.48184758", "0.48131365", "0.4794416", "0.479214", "0.47865447", "0.4784897", "0.47799528", "0.47710806", "0.47586882", "0.47554076", "0.47501436", "0.47453886", "0.4743101", "0.47086695", "0.46795866", "0.4673606", "0.46689352", "0.4660352", "0.46562693", "0.46488604", "0.46428424", "0.46238327", "0.46195608", "0.46184093", "0.46152377", "0.46110696", "0.46049273", "0.46000922", "0.4597859", "0.45949218", "0.45927447", "0.45877847", "0.45582354", "0.45544615", "0.45386866", "0.45332938", "0.4532112", "0.4531858", "0.4531776", "0.45316157", "0.4528408", "0.45205277", "0.4509386", "0.45054537", "0.44975087", "0.44778657", "0.44683605", "0.44667342", "0.4464291", "0.44513994", "0.4443954", "0.44383284", "0.44244188", "0.44244188", "0.4407312", "0.44047502", "0.43948585", "0.43936852", "0.4392813", "0.4392813", "0.4392813", "0.43910906", "0.43882194", "0.43856895", "0.43706858", "0.43668258", "0.4360832", "0.43587157" ]
0.0
-1
> To query available regions and zones where ApsaraDB for MongoDB instances can be created, call the [DescribeAvailableResource](~~149719~~) operation.
async def describe_regions_async( self, request: dds_20151201_models.DescribeRegionsRequest, ) -> dds_20151201_models.DescribeRegionsResponse: runtime = util_models.RuntimeOptions() return await self.describe_regions_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_resource(self, nodename):\n LOG.debug(\"get_available_resource\")\n\n dictval = self._host.properties\n\n return dictval", "def get_available_resource(self, nodename):\n if nodename not in self._drv_nodes:\n return {}\n supported_tuple = ('IA64', 'kvm', 'hvm')\n return {\n 'vcpus': drv_conf.max_vcpus,\n 'memory_mb': drv_conf.max_memory_mb,\n 'local_gb': drv_conf.max_disk_gb,\n 'vcpus_used': 0,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': self.name,\n 'hypervisor_version': '1',\n 'hypervisor_hostname': nodename,\n 'disk_available_least': 0,\n 'cpu_info': '?',\n 'numa_topology': None,\n 'supported_instances': [supported_tuple]\n }", "def show_available_products():\n\n mongo = MongoDBConnection()\n result = {}\n\n with mongo:\n db = mongo.connection.HPNorton\n productcollection = db[\"products\"]\n for document in productcollection.find({\"quantity_available\": {\"$gt\": \"0\"}}):\n key = document['product_id']\n\n result[key] = {\n 'description': document['description'],\n 'product_type': document['product_type'],\n 'quantity_available': document['quantity_available']\n }\n\n return result", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count", "def get_available_resource(self, nodename):\n curent_time = time.time()\n if curent_time - self.cleanup_time > CONF.azure.cleanup_span:\n self.cleanup_time = curent_time\n self._cleanup_deleted_os_disks()\n self._cleanup_deleted_nics()\n usage_family = 'basicAFamily'\n try:\n page = self.compute.usage.list(CONF.azure.location)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.ComputeUsageListFailure(reason=six.text_type(e))\n raise ex\n usages = [i for i in page]\n cores = 0\n cores_used = 0\n for i in usages:\n if hasattr(i, 'name') and hasattr(i.name, 'value'):\n if usage_family == i.name.value:\n cores = i.limit if hasattr(i, 'limit') else 0\n cores_used = i.current_value \\\n if hasattr(i, 'current_value') else 0\n break\n return {'vcpus': cores,\n 'memory_mb': 100000000,\n 'local_gb': 100000000,\n 'vcpus_used': cores_used,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': hv_type.HYPERV,\n 'hypervisor_version': 300,\n 'hypervisor_hostname': nodename,\n 'cpu_info': '{\"model\": [\"Intel(R) Xeon(R) CPU E5-2670 0 @ '\n '2.60GHz\"], \"topology\": {\"cores\": 16, \"threads\": '\n '32}}',\n 'supported_instances': [(arch.I686, hv_type.HYPERV,\n vm_mode.HVM),\n (arch.X86_64, hv_type.HYPERV,\n vm_mode.HVM)],\n 'numa_topology': None\n }", "def show_available_products():\n LOGGER.debug('Listing all available products.')\n available_products = {}\n with MongoDBConnection() as mongo:\n database = mongo.connection.hp_norton\n for product in database.products.find(\n {'quantity_available': {'$gt': 0}}):\n available_products[product['product_id']] = {\n 'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n return available_products", "def get_available_dbms(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/dbmss\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available DBMSs\"\n response_handler(response, error_msg)\n return response", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def _get_info_about_available_resources(self, min_ram, min_hdd, min_vcpus):\n vms_count = 0\n for hypervisor in self.nova_cli.hypervisors.list():\n if hypervisor.free_ram_mb >= min_ram:\n if hypervisor.free_disk_gb >= min_hdd:\n if hypervisor.vcpus - hypervisor.vcpus_used >= min_vcpus:\n # We need to determine how many VMs we can run\n # on this hypervisor\n free_cpu = hypervisor.vcpus - hypervisor.vcpus_used\n k1 = int(hypervisor.free_ram_mb / min_ram)\n k2 = int(hypervisor.free_disk_gb / min_hdd)\n k3 = int(free_cpu / min_vcpus)\n vms_count += min(k1, k2, k3)\n return vms_count", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def describe_availability_options(DomainName=None, Deployed=None):\n pass", "def show_available_products():\n products = DATABASE['product'].find({'quantity_available': {'$ne':'0'}})\n products_dict = {prod['product_id']:\n {'description': prod['description'],\n 'product_type': prod['product_type'],\n 'quantity_available': int(prod['quantity_available'])}\n for prod in products}\n return products_dict", "def get_available_db_drivers(connection, error_msg=None):\n url = f\"{connection.base_url}/api/dbobjects/drivers\"\n response = connection.session.get(url=url)\n if not response.ok:\n if error_msg is None:\n error_msg = \"Error getting available database drivers\"\n response_handler(response, error_msg)\n return response", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def avail_locations(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-locations option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_locations()[\"items\"]:\n reg, loc = item[\"id\"].split(\"/\")\n location = {\"id\": item[\"id\"]}\n\n if reg not in ret:\n ret[reg] = {}\n\n ret[reg][loc] = location\n return ret", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def _check_available() -> None:\n current_session().query(\"1\").from_statement(text(\"SELECT 1\")).all()", "def avail_locations(session=None, call=None):\n # TODO: need to figure out a good meaning of locations in Xen\n if call == \"action\":\n raise SaltCloudException(\n \"The avail_locations function must be called with -f or --function.\"\n )\n return pool_list()", "def get_resource_available_in_dt_range(candidate_resources, dt_range,\n new_resource_occupations):\n for resource in candidate_resources:\n\n # Only occupations of current resource\n res_new_occupations = [y[1] for y in filter(\n lambda x: x[0] == clean_resource(resource),\n new_resource_occupations)]\n\n # Check availability\n availability = resource.get('availability')\n if (availability and not is_datetime_range_available(dt_range,\n availability)):\n continue\n\n # Check occupations\n occupations = resource.get('occupations', []) + res_new_occupations\n overlappings = [overlaps(dt_range, o) for o in occupations]\n if any(overlappings):\n continue\n\n return resource\n\n return None", "def test_aws_service_api_availability_zones_get(self):\n pass", "def oci_mysql_dbsystem_high_availability_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for mysqldbs in get_mysql_db_systems(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(mysqldbs,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = mysqldbs[\"compartment_id\"]\n mysqldbsId = mysqldbs[\"id\"]\n mysqldbsName = mysqldbs[\"display_name\"]\n lbLifecycleState = mysqldbs[\"lifecycle_state\"]\n createdAt = str(mysqldbs[\"time_created\"])\n\n if mysqldbs[\"is_highly_available\"] is False:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is not highly available. A high availability DB system is made up of three MySQL instances: a primary instance and two secondary instances. Each MySQL instance utilizes the same amount of block volume storage, number of OCPUs, and amount of RAM defined in the shape chosen. The primary instance functions as a read/write endpoint and you have read/write access to the primary instance only. All data that you write to the primary instance is copied to the secondary instances asynchronously. The secondary instances are placed in different availability or fault domains. High availablility DB systems consume more resources (OCPUs, RAM, network bandwidth) than standalone DB systems. Hence the throughput and latency differ from the standalone DB systems. High availability uses MySQL Group Replication to replicate data from the primary instance to the secondary instances. The replication occurs over a secure, managed, internal network, unconnected to the VCN subnet you configured for the DB system. Limited information about this internal network is available in some Performance Schema tables, and you can neither connect to it nor view any other information related to it. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on High Availability refer to the Overview of High Availability section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/overview-high-availability.html#GUID-0387FC6B-73DF-4447-A206-3CBA2EB0FFB3\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{mysqldbsId}/oci-mysql-dbs-high-availability-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OCI.MySQLDatabaseService.7] MySQL Database Systems should be configured to be highly available\",\n \"Description\": f\"Oracle MySQL Database System {mysqldbsName} in Compartment {compartmentId} in {ociRegionName} is highly available.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on Deletion Planning for Database Systems refer to the Advanced Option: Deletion Plan section of the Oracle Cloud Infrastructure Documentation for MySQL Database.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/mysql-database/doc/advanced-options.html#MYAAS-GUID-29A995D2-1D40-4AE8-A654-FB6F40B07D85\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Database\",\n \"AssetService\": \"Oracle MySQL Database Service\",\n \"AssetComponent\": \"Database System\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciMySqlDatabaseServiceDatabaseSystem\",\n \"Id\": mysqldbsId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": mysqldbsName,\n \"Id\": mysqldbsId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.BE-5\",\n \"NIST CSF V1.1 PR.DS-4\",\n \"NIST CSF V1.1 PR.PT-5\",\n \"NIST SP 800-53 Rev. 4 AU-4\",\n \"NIST SP 800-53 Rev. 4 CP-2\",\n \"NIST SP 800-53 Rev. 4 CP-7\",\n \"NIST SP 800-53 Rev. 4 CP-8\",\n \"NIST SP 800-53 Rev. 4 CP-11\",\n \"NIST SP 800-53 Rev. 4 CP-13\",\n \"NIST SP 800-53 Rev. 4 PL-8\",\n \"NIST SP 800-53 Rev. 4 SA-14\",\n \"NIST SP 800-53 Rev. 4 SC-5\",\n \"NIST SP 800-53 Rev. 4 SC-6\",\n \"AICPA TSC CC3.1\",\n \"AICPA TSC A1.1\",\n \"AICPA TSC A1.2\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.12.3.1\",\n \"ISO 27001:2013 A.17.1.1\",\n \"ISO 27001:2013 A.17.1.2\",\n \"ISO 27001:2013 A.17.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_get_cloud_resources(self):\n pass", "def update_available_resource(self, ctxt, host):\n LOG.debug(\"update_available_resource\")\n return", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def is_available():", "def _update_available_resources(self, context):\n\n all_nodes = self.driver.get_available_nodes()\n all_rps = self.scheduler_client.reportclient\\\n .get_filtered_resource_providers({})\n node_uuids = [node.uuid for node in all_nodes]\n\n # Clean orphan resource providers in placement\n for rp in all_rps:\n if rp['uuid'] not in node_uuids:\n server_by_node = objects.Server.list(\n context, filters={'node_uuid': rp['uuid']})\n if server_by_node:\n continue\n self.scheduler_client.reportclient.delete_resource_provider(\n rp['uuid'])\n\n for node in all_nodes:\n if self.driver.is_node_consumable(node):\n self.scheduler_client.reportclient \\\n .delete_allocations_for_resource_provider(node.uuid)\n resource_class = sched_utils.ensure_resource_class_name(\n node.resource_class)\n inventory = self.driver.get_node_inventory(node)\n inventory_data = {resource_class: inventory}\n self.scheduler_client.set_inventory_for_provider(\n node.uuid, node.name or node.uuid, inventory_data,\n resource_class)", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def test_get_virtualization_realm_resources(self):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input[str]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"create_sample_data\"] = create_sample_data\n __props__.__dict__[\"db_instance_category\"] = db_instance_category\n __props__.__dict__[\"db_instance_class\"] = db_instance_class\n __props__.__dict__[\"db_instance_mode\"] = db_instance_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"encryption_key\"] = encryption_key\n __props__.__dict__[\"encryption_type\"] = encryption_type\n __props__.__dict__[\"engine\"] = engine\n __props__.__dict__[\"engine_version\"] = engine_version\n __props__.__dict__[\"instance_charge_type\"] = instance_charge_type\n __props__.__dict__[\"instance_group_count\"] = instance_group_count\n __props__.__dict__[\"instance_network_type\"] = instance_network_type\n __props__.__dict__[\"instance_spec\"] = instance_spec\n __props__.__dict__[\"ip_whitelists\"] = ip_whitelists\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"master_node_num\"] = master_node_num\n __props__.__dict__[\"payment_type\"] = payment_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"security_ip_lists\"] = security_ip_lists\n __props__.__dict__[\"seg_node_num\"] = seg_node_num\n __props__.__dict__[\"seg_storage_type\"] = seg_storage_type\n __props__.__dict__[\"ssl_enabled\"] = ssl_enabled\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"storage_size\"] = storage_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"used_time\"] = used_time\n __props__.__dict__[\"vector_configuration_status\"] = vector_configuration_status\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Instance(resource_name, opts=opts, __props__=__props__)", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def ex_list_availability_zones(self, only_available=True):\n params = {'Action': 'DescribeAvailabilityZones'}\n\n if only_available:\n params.update({'Filter.0.Name': 'state'})\n params.update({'Filter.0.Value.0': 'available'})\n\n params.update({'Filter.1.Name': 'region-name'})\n params.update({'Filter.1.Value.0': self.region_name})\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n availability_zones = []\n for element in self._findall(result, 'availabilityZoneInfo/item'):\n name = self._findtext(element, 'zoneName')\n zone_state = self._findtext(element, 'zoneState')\n region_name = self._findtext(element, 'regionName')\n\n availability_zone = ExEC2AvailabilityZone(\n name=name,\n zone_state=zone_state,\n region_name=region_name\n )\n availability_zones.append(availability_zone)\n\n return availability_zones", "def show_resource_pool(client, private_cloud, resource_pool, location):\n return client.get(location, private_cloud, resource_pool)", "def _get_available_region_options():\n available_regions = sorted(_get_available_regions())\n options = [ConfigurationOption(region, region) for region in available_regions]\n\n return options", "def get_availability_zones(self, context, filters=None, fields=None,\n sorts=None, limit=None, marker=None,\n page_reverse=False):", "def update_available_resource(self, ctxt, host):\n return", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def databases(request): # pylint: disable=unused-argument\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)", "def list_allocation_candidates(req):\n context = req.environ['placement.context']\n context.can(policies.LIST)\n want_version = req.environ[microversion.MICROVERSION_ENVIRON]\n get_schema = _get_schema(want_version)\n util.validate_query_params(req, get_schema)\n\n rqparams = lib.RequestWideParams.from_request(req)\n groups = lib.RequestGroup.dict_from_request(req, rqparams)\n\n if not rqparams.group_policy:\n # group_policy is required if more than one numbered request group was\n # specified.\n if len([rg for rg in groups.values() if rg.use_same_provider]) > 1:\n raise webob.exc.HTTPBadRequest(\n 'The \"group_policy\" parameter is required when specifying '\n 'more than one \"resources{N}\" parameter.')\n\n # We can't be aware of nested architecture with old microversions\n nested_aware = want_version.matches((1, 29))\n\n try:\n cands = ac_obj.AllocationCandidates.get_by_requests(\n context, groups, rqparams, nested_aware=nested_aware)\n except exception.ResourceClassNotFound as exc:\n raise webob.exc.HTTPBadRequest(\n 'Invalid resource class in resources parameter: %(error)s' %\n {'error': exc})\n except exception.TraitNotFound as exc:\n raise webob.exc.HTTPBadRequest(str(exc))\n\n response = req.response\n trx_cands = _transform_allocation_candidates(cands, groups, want_version)\n json_data = jsonutils.dumps(trx_cands)\n response.body = encodeutils.to_utf8(json_data)\n response.content_type = 'application/json'\n if want_version.matches((1, 15)):\n response.cache_control = 'no-cache'\n response.last_modified = timeutils.utcnow(with_timezone=True)\n return response", "def ip_allocations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:\n return pulumi.get(self, \"ip_allocations\")", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def get_all_db_region(self, context):\n zone_objs = self.dns_manager.get_all_db_region(context)\n return zone_objs", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def update_available_resource(self, context):\n new_resource_tracker_dict = {}\n\n compute_nodes_in_db = self._get_compute_nodes_in_db(context,\n use_slave=True)\n nodenames = set(self.driver.get_available_nodes())\n for nodename in nodenames:\n rt = self._get_resource_tracker(nodename)\n try:\n rt.update_available_resource(context)\n except exception.ComputeHostNotFound:\n # NOTE(comstud): We can get to this case if a node was\n # marked 'deleted' in the DB and then re-added with a\n # different auto-increment id. The cached resource\n # tracker tried to update a deleted record and failed.\n # Don't add this resource tracker to the new dict, so\n # that this will resolve itself on the next run.\n LOG.info(_LI(\"Compute node '%s' not found in \"\n \"update_available_resource.\"), nodename)\n continue\n except Exception:\n LOG.exception(_LE(\"Error updating resources for node \"\n \"%(node)s.\"), {'node': nodename})\n new_resource_tracker_dict[nodename] = rt\n\n # NOTE(comstud): Replace the RT cache before looping through\n # compute nodes to delete below, as we can end up doing greenthread\n # switches there. Best to have everyone using the newest cache\n # ASAP.\n self._resource_tracker_dict = new_resource_tracker_dict\n\n # Delete orphan compute node not reported by driver but still in db\n for cn in compute_nodes_in_db:\n if cn.hypervisor_hostname not in nodenames:\n LOG.info(_LI(\"Deleting orphan compute node %s\"), cn.id)\n cn.destroy()", "def avail(self, time, resource_group):\n a = set()\n for r in self.resource_group.resources:\n pass", "def list_rds(region, filter_by_kwargs):\n conn = boto.rds.connect_to_region(region)\n instances = conn.get_all_dbinstances()\n return lookup(instances, filter_by=filter_by_kwargs)", "def GetAvailabilityOfConnection(ConnectionInfo, StartDate, EndDate):\r\n\tVerkehrstageHex = ConnectionInfo[ConnInfoInd['trafficdays_hexcode']]\r\n\treturn GetAvailabilityBetweenDates(StartDate, EndDate, VerkehrstageHex)", "def list_resource_pool(client, private_cloud, location):\n return client.list(location, private_cloud)", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def supports_catalog_lookup(self):\n # Implemented from kitosid template for -\n # osid.resource.ResourceProfile.supports_resource_lookup\n return self._provider_manager.supports_catalog_lookup()", "def test_list_cluster_resource_quota(self):\n pass", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def __init__(__self__,\n resource_name: str,\n args: DatabaseReplicaArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def test_show_available(self):\n database.import_data('csvs', 'product_data.csv', 'customer_data.csv', 'rentals_data.csv')\n actual_available = database.show_available_products()\n expected_available = {'prd001': {'description': 'TV', 'product_type': 'livingroom',\n 'quantity_available': '3'},\n 'prd002': {'description': 'Couch', 'product_type': 'livingroom',\n 'quantity_available': '1'}}\n self.assertEqual(actual_available, expected_available)\n database.delete_database()\n\n database.import_data('csvs', 'produc_data.csv', 'customer_data.csv', 'rentals_data.csv')\n database.delete_database()", "def get_available_agendas(self):\n pass", "def getcars():\n cars = Car.query.filter(Car.isavailable == True)\n result = carsSchema.dump(cars)\n print(result)\n return jsonify(result)", "def get_available_resources(threshold, usage, total):\n return dict((host, int(threshold * total[host] - resource))\n for host, resource in usage.items())", "def acquire(self):\n self.logger.debug(\"in NerscAllocationInfo acquire\")\n return {\"Nersc_Allocation_Info\": pd.DataFrame(self.send_query())}", "def test_index_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def get_instance_ram_allocated(self, resource, period,\n aggregate, granularity=None):\n pass", "def test_read_cluster_resource_quota_status(self):\n pass", "def is_spatialized(resource):\n spatialized = False\n resource_id = resource['id']\n package_id=ckan_model.Resource.get(resource_id).resource_group.package_id\n package = ckan_model.Package.get(package_id)\n for resource in package.resources:\n if 'protocol' in resource.extras and 'parent_resource' in resource.extras:\n extras = resource.extras\n try:\n toolkit.get_action('resource_show')(None, { 'id':resource.id,'for-view':True })\n except (NotFound):\n continue\n\n if extras['parent_resource'] == resource_id\\\n and ( extras['protocol'].lower() == 'ogc:wms' or extras['ogc_type'].lower() == 'ogc:wfs'):\n print resource.state\n if resource.state !='active':\n return False\n spatialized = True\n break\n return spatialized", "def count_orphan_resource_providers(db):\n sql = '''\\\n SELECT COUNT(*)\n FROM nova_api.resource_providers rp JOIN nova.compute_nodes cn\n ON cn.hypervisor_hostname = rp.name\n WHERE cn.deleted = 0\n AND rp.uuid != cn.uuid\n '''\n return db.query(sql)", "def get_available_images():\n return AVAILABLE_IMAGES", "def describe_rds_db_instances(StackId=None, RdsDbInstanceArns=None):\n pass", "def list_availability_definition(self):\n return self._get(path='availability')", "def spark_list(provider):\n api.available(provider)", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def get_oracle(verbosity, resultset, providerversion):\n try:\n response = requests.get(ORACLEAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n providerversion[\"ORACLE\"] = cidrdata[\"last_updated_timestamp\"]\n for i in range(0, len(cidrdata[\"regions\"])):\n for j in range(0, len(cidrdata[\"regions\"][i][\"cidrs\"])):\n if cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"] not in resultset:\n resultset[cidrdata[\"regions\"][i][\"cidrs\"][j][\"cidr\"]] = \"Oracle\"\n\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n\n return resultset, providerversion", "def test_read_cluster_resource_quota(self):\n pass", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def validate_availability_zones(self, context, resource_type,\n availability_zones):", "def available(self):\n return self[\"available\"]", "def describe_availability_zones_with_options(\n self,\n request: dds_20151201_models.DescribeAvailabilityZonesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeAvailabilityZonesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.accept_language):\n query['AcceptLanguage'] = request.accept_language\n if not UtilClient.is_unset(request.db_type):\n query['DbType'] = request.db_type\n if not UtilClient.is_unset(request.exclude_secondary_zone_id):\n query['ExcludeSecondaryZoneId'] = request.exclude_secondary_zone_id\n if not UtilClient.is_unset(request.exclude_zone_id):\n query['ExcludeZoneId'] = request.exclude_zone_id\n if not UtilClient.is_unset(request.instance_charge_type):\n query['InstanceChargeType'] = request.instance_charge_type\n if not UtilClient.is_unset(request.mongo_type):\n query['MongoType'] = request.mongo_type\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.storage_support):\n query['StorageSupport'] = request.storage_support\n if not UtilClient.is_unset(request.storage_type):\n query['StorageType'] = request.storage_type\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeAvailabilityZones',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeAvailabilityZonesResponse(),\n self.call_api(params, req, runtime)\n )", "def ComputeEAvailable(self):\r\n pass", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def get_available_regions(service_name):\n session = boto3.session.Session()\n return session.get_available_regions(service_name)", "def is_available(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n args: DatabaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def __init__(__self__,\n resource_name: str,\n args: RegionPerInstanceConfigArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def GetApiCollection(resource_type):\n return 'compute.' + resource_type", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def getStudyRegions():\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n exclusionRows = ['master', 'tempdb', 'model', 'msdb', 'syHazus', 'CDMS', 'flTmpDB']\n cursor = conn.cursor()\n cursor.execute('SELECT [StateID] FROM [syHazus].[dbo].[syState]') \n for state in cursor:\n exclusionRows.append(state[0])\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM sys.databases')\n studyRegions = []\n for row in cursor:\n if row[0] not in exclusionRows:\n studyRegions.append(row[0])\n studyRegions.sort(key=lambda x: x.lower())\n return studyRegions", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def show(collection, filter = {}):\n # creates a connection with database\n result = []\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"techstart\"]\n col = db[collection]\n for x in col.find(filter):\n result.append(x)\n return result", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db" ]
[ "0.60551995", "0.5760646", "0.57550323", "0.5718856", "0.5698559", "0.5651317", "0.5415391", "0.537108", "0.5326645", "0.5323153", "0.5282289", "0.52814525", "0.52669567", "0.5246053", "0.5229421", "0.51913524", "0.5127985", "0.51034117", "0.50851274", "0.5082279", "0.5065411", "0.49871925", "0.4936069", "0.49311548", "0.4918013", "0.49137565", "0.49058193", "0.487839", "0.48722693", "0.48635742", "0.48497558", "0.48483235", "0.48396334", "0.48184606", "0.48144183", "0.47949022", "0.47933233", "0.47869393", "0.47849923", "0.4781802", "0.4773984", "0.47567204", "0.47550437", "0.4749413", "0.47472683", "0.4743972", "0.47082165", "0.46785608", "0.46750805", "0.4671412", "0.46620652", "0.46559212", "0.4649686", "0.464362", "0.46239653", "0.46199468", "0.46191502", "0.46159208", "0.46122012", "0.4605546", "0.4599438", "0.45987833", "0.45953676", "0.45922846", "0.45901024", "0.455933", "0.45541394", "0.45374724", "0.45351022", "0.45343187", "0.45331162", "0.45327255", "0.4531376", "0.4530157", "0.4519199", "0.45111224", "0.4505876", "0.4498664", "0.44779083", "0.44695356", "0.44667223", "0.44643384", "0.44548345", "0.4443826", "0.44411546", "0.44257474", "0.44257474", "0.4409727", "0.4404174", "0.4395109", "0.4395109", "0.4395109", "0.43948823", "0.43946773", "0.4388398", "0.43881032", "0.4385349", "0.43684635", "0.4365823", "0.43608606", "0.4356734" ]
0.0
-1
This operation is applicable to subscription instances.
def describe_renewal_price_with_options( self, request: dds_20151201_models.DescribeRenewalPriceRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRenewalPriceResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.business_info): query['BusinessInfo'] = request.business_info if not UtilClient.is_unset(request.coupon_no): query['CouponNo'] = request.coupon_no if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRenewalPrice', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRenewalPriceResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6768464", "0.6724986", "0.67119396", "0.6617272", "0.659594", "0.6585656", "0.65232456", "0.6443254", "0.6432979", "0.6342068", "0.6316182", "0.6316182", "0.63120866", "0.62734044", "0.6266068", "0.6236521", "0.6234891", "0.6200018", "0.6196757", "0.61762744", "0.61590755", "0.6145037", "0.61427397", "0.6117785", "0.60913867", "0.6046587", "0.60381734", "0.60381734", "0.60095227", "0.60068405", "0.6002366", "0.5974324", "0.5970992", "0.59404236", "0.5922197", "0.58950824", "0.58852506", "0.58791226", "0.58771014", "0.5865434", "0.5850745", "0.5843618", "0.5842252", "0.5842233", "0.5829431", "0.5820333", "0.58125776", "0.57993525", "0.57912076", "0.57912076", "0.57912076", "0.57897145", "0.57676", "0.57596546", "0.5758095", "0.57508147", "0.5745929", "0.57144934", "0.5696573", "0.5695495", "0.56950444", "0.5684795", "0.56609845", "0.5660792", "0.56408226", "0.56199765", "0.5612484", "0.5610517", "0.5599685", "0.5596116", "0.5595185", "0.55934614", "0.55910414", "0.55863214", "0.55849946", "0.55775213", "0.55711204", "0.5563398", "0.5536979", "0.55124307", "0.5510051", "0.5499565", "0.546586", "0.54585266", "0.54536515", "0.5451924", "0.5440518", "0.5438442", "0.54334587", "0.54208875", "0.5406239", "0.5402143", "0.53994006", "0.5399044", "0.53945386", "0.5393975", "0.53886455", "0.53747004", "0.53721243", "0.53708404", "0.5369461" ]
0.0
-1
This operation is applicable to subscription instances.
async def describe_renewal_price_with_options_async( self, request: dds_20151201_models.DescribeRenewalPriceRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRenewalPriceResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.business_info): query['BusinessInfo'] = request.business_info if not UtilClient.is_unset(request.coupon_no): query['CouponNo'] = request.coupon_no if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRenewalPrice', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRenewalPriceResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def get_subscriptions(self):\n return self.subscriptions.all()", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6766924", "0.6723815", "0.67114395", "0.66165465", "0.6594774", "0.65839344", "0.6522862", "0.6443223", "0.64325064", "0.63409066", "0.6316549", "0.6316549", "0.63120353", "0.62725765", "0.62641907", "0.62356377", "0.6234742", "0.6200784", "0.6197641", "0.6175624", "0.61567813", "0.6144675", "0.6142669", "0.6117894", "0.60898656", "0.6045312", "0.6037747", "0.6037747", "0.60090065", "0.6006827", "0.6002745", "0.5973457", "0.59708565", "0.5938422", "0.59216875", "0.58956856", "0.5884083", "0.5878568", "0.58764887", "0.5864635", "0.58512646", "0.58422667", "0.5841695", "0.5840756", "0.58291566", "0.5818745", "0.5813365", "0.5799376", "0.5790863", "0.5790863", "0.5790863", "0.5789006", "0.5766831", "0.5760116", "0.5756521", "0.57498324", "0.5745291", "0.5712999", "0.5696784", "0.56962174", "0.56935626", "0.5683641", "0.5660921", "0.56607026", "0.5641244", "0.5619445", "0.5611283", "0.5609982", "0.5598294", "0.55964607", "0.5595865", "0.5592494", "0.55910856", "0.5585468", "0.55849886", "0.55776924", "0.55707777", "0.55619156", "0.55364245", "0.55118704", "0.55088645", "0.5499071", "0.5464195", "0.54575783", "0.5453018", "0.5450689", "0.5440342", "0.5438776", "0.5432882", "0.5420828", "0.5404704", "0.54024047", "0.5397794", "0.5397691", "0.5393936", "0.5392811", "0.5388118", "0.53736436", "0.5372243", "0.5370428", "0.5369889" ]
0.0
-1
This operation is applicable to subscription instances.
def describe_renewal_price( self, request: dds_20151201_models.DescribeRenewalPriceRequest, ) -> dds_20151201_models.DescribeRenewalPriceResponse: runtime = util_models.RuntimeOptions() return self.describe_renewal_price_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()" ]
[ "0.6768464", "0.6724986", "0.67119396", "0.6617272", "0.659594", "0.6585656", "0.65232456", "0.6443254", "0.6432979", "0.6342068", "0.6316182", "0.6316182", "0.63120866", "0.62734044", "0.6266068", "0.6236521", "0.6234891", "0.6200018", "0.6196757", "0.61762744", "0.61590755", "0.6145037", "0.61427397", "0.6117785", "0.60913867", "0.6046587", "0.60381734", "0.60381734", "0.60095227", "0.60068405", "0.6002366", "0.5974324", "0.5970992", "0.59404236", "0.5922197", "0.58950824", "0.58852506", "0.58791226", "0.58771014", "0.5865434", "0.5850745", "0.5843618", "0.5842252", "0.5842233", "0.5829431", "0.5820333", "0.58125776", "0.57993525", "0.57912076", "0.57912076", "0.57912076", "0.57897145", "0.57676", "0.57596546", "0.5758095", "0.57508147", "0.5745929", "0.57144934", "0.5696573", "0.5695495", "0.56950444", "0.5684795", "0.56609845", "0.5660792", "0.56408226", "0.56199765", "0.5612484", "0.5610517", "0.5599685", "0.5596116", "0.5595185", "0.55934614", "0.55910414", "0.55863214", "0.55849946", "0.55775213", "0.55711204", "0.5563398", "0.5536979", "0.55124307", "0.5510051", "0.5499565", "0.546586", "0.54585266", "0.54536515", "0.5451924", "0.5440518", "0.5438442", "0.54334587", "0.54208875", "0.5406239", "0.5402143", "0.53994006", "0.5399044", "0.53945386", "0.5393975", "0.53886455", "0.53747004", "0.53721243", "0.53708404", "0.5369461" ]
0.0
-1
This operation is applicable to subscription instances.
async def describe_renewal_price_async( self, request: dds_20151201_models.DescribeRenewalPriceRequest, ) -> dds_20151201_models.DescribeRenewalPriceResponse: runtime = util_models.RuntimeOptions() return await self.describe_renewal_price_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self):\r\n return SubscriptionResource(self)", "def test_update_subscription(self):\n pass", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)", "def test_get_subscription(self):\n pass", "def test_get_subscriptions(self):\n pass", "def test_create_subscription(self):\n pass", "def get_subscription(self):\n return self.request({\n 'path': '/' + UUID + '/subscription'})", "def subscription(self):\n return self._subscription", "def subscription(self):\n return self._subscription", "def test_process_subscriptions(self):\n pass", "def touch_subscription(self):\n try:\n # check if subscription exists\n sub = self.client.get_subscription(subscription=self.subscription_path)\n\n except NotFound:\n self._create_subscription()\n\n else:\n self.topic_path = sub.topic\n print(f\"Subscription exists: {self.subscription_path}\")\n print(f\"Connected to topic: {self.topic_path}\")", "def post_get_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)", "def subscribe(self, subject):\n pass", "def _subscribe(self):\n if self.subscribed:\n return False\n return {}", "def subscriptions(self):\r\n return subs.Subscriptions(self)", "def subscription(self, subscription):\n\n self._subscription = subscription", "def post_update_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def test_issue_subscriptions(self):\n pass", "def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def subscription(bot, update):\n chat_id = update.message.chat_id\n bot.sendMessage(chat_id=chat_id, text=SUBSCRIPTION_MSG, parse_mode='markdown', \n disable_web_page_preview=True)\n \n mp.track(get_user_info(chat_id)['PID'], 'Checked Subscription')", "def test_load_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def subscription(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subscription\")", "def test_aws_service_api_validate_subscription_post(self):\n pass", "def subscriptions(self):\r\n return v3.Subscriptions(self)", "def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")", "def test_modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )", "def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceIdArgs']]]]:\n return pulumi.get(self, \"subscriptions\")", "def no_save_individual_subscription(sender, instance, **kwargs):\n try:\n Subscription.objects.get(pk=instance.pk) # looking if the subscription exist, if the case, we assume here is updating active status or email status\n except:\n if instance.user is not None:\n subs_ids = instance.user.groups.values_list('subscription')\n for sub in subs_ids:\n if None not in sub:\n alarm = Subscription.objects.get(id=sub[0]).alarm\n if alarm == instance.alarm:\n raise ValidationError('The user is subscribed to the same alarm for a group')\n\n subs = Subscription.objects.filter(user=instance.user)\n for sub in subs:\n if sub.alarm == instance.alarm:\n raise ValidationError('The user is subscribed to this alarm')", "def test_create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_issue_add_subscription(self):\n pass", "def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def subscribe(self, inst):\r\n if inst not in self._subscribers:\r\n self._subscribers.append(inst)\r\n vprint(\"{} is subscribed to {}\".format(inst.name, self.name))", "def test_index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def post(self):\n data = request.json\n return new_subscription(data=data)", "def test_update_subscription_template(self):\n pass", "def test_delete_subscription(self):\n pass", "def get_subscription(self):\n if not hasattr(self, '_subscription'):\n self._subscription = self.admin.subscriptions.select_related('plan').get_overlapping(\n self.admin_id, DateRange(self.period, self.period_end, bounds='[]'))\n return self._subscription", "def get_subscriptions(self):\n return {}", "def subscribe(self):\n res = self._subscribe()\n if res is not None:\n self._subscribed = True\n return res", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass", "def subscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = Subscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def subscriptions(self):\r\n return subs.AccountSubscriptions(self)", "def set_subscription(self, value):\n self.pub_socket.setsockopt(zmq.SUBSCRIBE, value)", "def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}", "def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True", "def to_subscription_instance_10(self):\n delivery_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n\n si = tm10.SubscriptionInstance(subscription_id=str(self.subscription_id),\n delivery_parameters=delivery_params,\n poll_instances=poll_instances)\n return si", "def subscription(self) -> SubscriptionServiceProxy:\n if self._subscription_services is None:\n self._subscription_services = SubscriptionServiceProxy(self)\n return self._subscription_services", "def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]", "def post_list_subscriptions(\n self, response: pubsub.ListSubscriptionsResponse\n ) -> pubsub.ListSubscriptionsResponse:\n return response", "def subscribe(self, request: Request) -> Response:\n ids = request.data.get(\"ids\", None)\n session_id = request.data.get(\"session_id\")\n content_type = ContentType.objects.get_for_model(self.get_queryset().model)\n user = request.user if request.user.is_authenticated else get_anonymous_user()\n subscription = Subscription.objects.create(user=user, session_id=session_id)\n\n if ids is None:\n # Subscribe to the whole table.\n subscription.subscribe(\n content_type, [Observer.ALL_IDS], (ChangeType.CREATE, ChangeType.DELETE)\n )\n else:\n # Verify all ids exists and user has permissions to view them.\n for id in ids:\n if not self.user_has_permission(id, request.user):\n raise NotFound(f\"Item {id} does not exist\")\n\n change_types = (ChangeType.UPDATE, ChangeType.DELETE, ChangeType.CREATE)\n subscription.subscribe(content_type, ids, change_types)\n\n resp = {\"subscription_id\": subscription.subscription_id}\n return Response(resp)", "def _pause_subscription(self):\n return {}", "def add_subscription(self, query):\n key = query.key()\n if key not in self.subscriptions:\n self.subscriptions += [key]\n self.put()", "def test_subscribe(self):\n self.service.clientConnected()\n self.service.subscribe(u'url', None)\n pubsubClient = self.service.pubsubClient\n self.assertIn(u'url', pubsubClient.subscriptions)", "def test_list_template_subscriptions(self):\n pass", "def test_update_template_subscription(self):\n pass", "def get_subscription_id(self):\n return self.instance_metadata.subscription_id", "def _async_track_subscription(self, subscription: Subscription) -> None:\n if _is_simple_match(subscription.topic):\n self._simple_subscriptions.setdefault(subscription.topic, []).append(\n subscription\n )\n else:\n self._wildcard_subscriptions.append(subscription)", "def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions", "def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )", "def test_get_template_subscription(self):\n pass", "def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}", "def GetSubscription(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_subscriptions(self):\n return self.subscriptions.all()", "def _unsubscribe(self):\n if not self.subscribed:\n return False\n return {}", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp", "def _create_sub(name, rostype, topic_callback, *args, **kwargs):\n # counting subscriber instance per topic name\n if name in TopicBack.sub_instance_count.keys():\n TopicBack.sub_instance_count[name] += 1\n else:\n TopicBack.sub_instance_count[name] = 1\n\n return rospy.Subscriber(name, rostype, topic_callback, *args, **kwargs)", "def delete_individual_subscriptions_for_grupal_subscription(sender, instance, **kwargs):\n if instance.group is not None: # Only for group subscription creation\n users = User.objects.filter(groups__name=instance.group)\n subs = Subscription.objects.filter(user__in=users)\n for sub in subs:\n if sub.alarm == instance.alarm:\n print('%s deleted' % sub)\n sub.delete()", "def test_create_subscription_template(self):\n pass", "def test_issue_delete_subscription(self):\n pass", "def to_subscription_instance_11(self):\n subscription_params = tm11.SubscriptionParameters(response_type=self.response_type,\n content_bindings=[str(x) for x in\n self.supported_content.all()])\n\n if self.query:\n subscription_params.query = self.query.to_query_11()\n\n push_params = None # TODO: Implement this\n poll_instances = None # TODO: Implement this\n si = tm11.SubscriptionInstance(subscription_id=str(self.subscription_id),\n status=self.status,\n subscription_parameters=subscription_params,\n push_parameters=push_params,\n poll_instances=poll_instances)\n return si", "def subscribe(self, item_name):\n if item_name == ITEM_NAME:\n self.subscribed = item_name\n else:\n # Only one item for a unique chat room is managed.\n raise SubscribeError(\"No such item\")", "def __init__(self, data):\n super(OptionsChainSubscriptionCreate, self).__init__()\n self.data = data", "def test_get_subscription_template(self):\n pass", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def subscribeConsumer(consumer):", "def __init__(self, ContextId, ReferenceId, data):\n super(OptionsChainSubscriptionModify, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)\n self.data = data", "def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]", "def subscribe_command(shared, chat, message, args):\n subs = shared[\"subs\"]\n subs.append(chat.id)\n shared[\"subs\"] = subs", "def __init__(self, ContextId, ReferenceId):\n super(OptionsChainSubscriptionRemove, self).__init__(\n ReferenceId=ReferenceId,\n ContextId=ContextId)", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED", "def subscription(self, subscription):\n if subscription is None:\n raise ValueError(\"Invalid value for `subscription`, must not be `None`\")\n\n self._subscription = subscription", "def pause_subscription(self):\n res = self._pause_subscription()\n self._subscribed = not self.subscribed\n return res", "def test_list_pending_template_subscriptions(self):\n pass", "def subscribe(receiver, updateInterval=10):", "def getsubscriptions(self):\n subs = {}\n for sub in self._subscriptions.values():\n subs[sub.ID] = sub.asTuple()\n return subs", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()", "def test_existing_subscriptions_autosubscription(self) -> None:\n stream_name = \"new_public_stream\"\n cordelia = self.example_user(\"cordelia\")\n self.common_subscribe_to_streams(cordelia, [stream_name], invite_only=False)\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"false\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertFalse(response_dict[\"subscribed\"])\n\n result = self.client_post(\n \"/json/subscriptions/exists\", {\"stream\": stream_name, \"autosubscribe\": \"true\"}\n )\n response_dict = self.assert_json_success(result)\n self.assertIn(\"subscribed\", response_dict)\n self.assertTrue(response_dict)" ]
[ "0.6767392", "0.672459", "0.6712795", "0.66163754", "0.659562", "0.6585582", "0.6523138", "0.6443233", "0.64327395", "0.63420546", "0.6317172", "0.6317172", "0.6311791", "0.6272946", "0.62651706", "0.623661", "0.6234481", "0.62003255", "0.61978865", "0.61763334", "0.6157788", "0.61439747", "0.61425656", "0.611801", "0.60908514", "0.6046276", "0.6038657", "0.6038657", "0.6009132", "0.6007158", "0.6003454", "0.5973642", "0.597068", "0.59394664", "0.59223974", "0.58971477", "0.588576", "0.58791244", "0.58758944", "0.5865233", "0.58518696", "0.5842229", "0.58422184", "0.5841435", "0.58288974", "0.5820687", "0.58134615", "0.579945", "0.5791124", "0.5791124", "0.5791124", "0.57893735", "0.5767502", "0.5760433", "0.575773", "0.5749892", "0.5745516", "0.57154554", "0.56971544", "0.56968284", "0.56943005", "0.5684036", "0.5661033", "0.5660045", "0.5640525", "0.5619953", "0.5611931", "0.5611749", "0.5599006", "0.55963916", "0.5596389", "0.55936116", "0.55907357", "0.5586081", "0.5585373", "0.5577829", "0.5570555", "0.5562918", "0.55378914", "0.5512908", "0.5509801", "0.5498297", "0.54666364", "0.5457887", "0.5454401", "0.5451905", "0.54399776", "0.5438099", "0.5433395", "0.54212356", "0.5406004", "0.54027253", "0.539961", "0.53986406", "0.53941923", "0.53938574", "0.5387973", "0.53744996", "0.53727114", "0.537074", "0.53703344" ]
0.0
-1
This operation is applicable to replica set instances and standalone instances, but not to sharded cluster instances.
def describe_replica_set_role_with_options( self, request: dds_20151201_models.DescribeReplicaSetRoleRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeReplicaSetRoleResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeReplicaSetRole', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeReplicaSetRoleResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_patch_cluster_role(self):\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_replace_cluster_role(self):\n pass", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_patch_hyperflex_cluster(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_update_hyperflex_cluster(self):\n pass", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def test_read_cluster_role(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_read_cluster_policy(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def test_create_cluster_role(self):\n pass", "def test_index_nas_shares_by_pool(self):\n pass", "def delete_cluster(self):", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def _same_instance(client1, client2):\n return client1._topology_settings.seeds == client2._topology_settings.seeds", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def test_delete_collection_cluster_policy(self):\n pass", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_update_nas_share_by_pool(self):\n pass", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_patch_cluster_role_binding(self):\n pass", "def replication(self):\n return self._replication", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_patch_cluster_network(self):\n pass", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def replicas(self, replicas):\n\n self._replicas = replicas", "def disable_replicate(self, req, id, body):\n LOG.info(_LI(\"Disable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.disable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_replace_cluster_network(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def test_create_cluster_policy(self):\n pass", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def test_delete_cluster_role(self):\n pass", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def test_list_cluster_policy(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def get_replica_ips(self):\n return self.membership", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def test_replace_cluster_role_binding(self):\n pass", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_list_cluster_role(self):\n pass", "def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)", "def resource_type(self):\n return 'cluster'", "def test_patch_hyperflex_cluster_network_policy(self):\n pass", "def test_unassign_configuration_from_instances(self):\n instance_info.dbaas.instances.update(configuration_instance.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n instance_info.dbaas.instances.get(instance_info.id)\n\n def result_has_no_configuration():\n instance = instance_info.dbaas.instances.get(inst_info.id)\n if hasattr(instance, 'configuration'):\n return False\n else:\n return True\n\n inst_info = instance_info\n poll_until(result_has_no_configuration)\n inst_info = configuration_instance\n poll_until(result_has_no_configuration)\n\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)", "def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def preflight(self, connection):\n return True", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def test_update_instance_limit1(self):\n pass", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def test_crud_autoscaler(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster autoscaler\n response = self._create_autoscaler(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n autoscaler_id = self._list_autoscalers(cluster_id)\n\n # update autoscaler\n response = self._update_autoscaler(cluster_id, autoscaler_id)\n self.assertDictContainsSubset(self.AUTOSCALER_UPDATE_DATA, response)\n\n # check it exists\n autoscaler_id = self._check_autoscaler_exists(cluster_id, autoscaler_id)\n\n # delete the object\n response = self._delete_autoscaler(cluster_id, autoscaler_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_autoscalers_exist(cluster_id)", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def failover_replicate(self, req, id, body):\n LOG.info(_LI(\"Failover volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n params = body.get('failover_replicate', {})\n\n checkpoint_id = params.get('checkpoint_id', None)\n force = params.get('force', False)\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.failover_replicate(\n context, volume, checkpoint_id, force)\n return self._view_builder.action_summary(req, replicate_info)", "def enable_instance_modification(self):\n self._request({\"enable-instance-modification\": True})", "def test_create_cluster_resource_quota(self):\n pass", "def test_delete_cluster_policy(self):\n pass", "def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)" ]
[ "0.6235598", "0.6034554", "0.5694733", "0.5646017", "0.56440216", "0.54747474", "0.5452212", "0.53970134", "0.53528446", "0.5319955", "0.52906865", "0.5269184", "0.523622", "0.5152003", "0.51320475", "0.50894994", "0.5066048", "0.50578713", "0.50511944", "0.5039058", "0.5033668", "0.50330997", "0.5004842", "0.50030345", "0.49963602", "0.4995974", "0.49865982", "0.49673122", "0.4951828", "0.49394482", "0.4938433", "0.49033666", "0.48949558", "0.4886562", "0.4885587", "0.48833707", "0.48725247", "0.48711684", "0.48439056", "0.4841425", "0.4839226", "0.48380563", "0.48333198", "0.48204106", "0.48185363", "0.48165423", "0.4813661", "0.48123437", "0.4803667", "0.48026344", "0.4796802", "0.47883546", "0.47881228", "0.47649378", "0.47643968", "0.47617927", "0.47302407", "0.4723675", "0.471896", "0.47074193", "0.46965697", "0.46936017", "0.46926036", "0.46913958", "0.46910277", "0.46905157", "0.46831277", "0.46791822", "0.46774694", "0.46753186", "0.46730226", "0.46706218", "0.46637732", "0.46635908", "0.46627632", "0.46559414", "0.46557987", "0.46494275", "0.46485764", "0.46482638", "0.46394807", "0.46356714", "0.46339294", "0.4630341", "0.46280196", "0.46255767", "0.46248707", "0.46246627", "0.46181569", "0.46125743", "0.46041498", "0.460292", "0.46026477", "0.4602283", "0.45976254", "0.45908973", "0.45833734", "0.45805952", "0.45797732", "0.45767343", "0.45719993" ]
0.0
-1
This operation is applicable to replica set instances and standalone instances, but not to sharded cluster instances.
async def describe_replica_set_role_with_options_async( self, request: dds_20151201_models.DescribeReplicaSetRoleRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeReplicaSetRoleResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeReplicaSetRole', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeReplicaSetRoleResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_patch_cluster_role(self):\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_replace_cluster_role(self):\n pass", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_patch_hyperflex_cluster(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_update_hyperflex_cluster(self):\n pass", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def test_read_cluster_role(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def test_read_cluster_policy(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def test_create_cluster_role(self):\n pass", "def test_index_nas_shares_by_pool(self):\n pass", "def delete_cluster(self):", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def _same_instance(client1, client2):\n return client1._topology_settings.seeds == client2._topology_settings.seeds", "def test_delete_collection_cluster_policy(self):\n pass", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_update_nas_share_by_pool(self):\n pass", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def replication(self):\n return self._replication", "def test_patch_cluster_role_binding(self):\n pass", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_patch_cluster_network(self):\n pass", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def replicas(self, replicas):\n\n self._replicas = replicas", "def disable_replicate(self, req, id, body):\n LOG.info(_LI(\"Disable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.disable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_replace_cluster_network(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def test_create_cluster_policy(self):\n pass", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_delete_cluster_role(self):\n pass", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def test_list_cluster_policy(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def get_replica_ips(self):\n return self.membership", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_replace_cluster_role_binding(self):\n pass", "def test_list_cluster_role(self):\n pass", "def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)", "def resource_type(self):\n return 'cluster'", "def test_unassign_configuration_from_instances(self):\n instance_info.dbaas.instances.update(configuration_instance.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n instance_info.dbaas.instances.get(instance_info.id)\n\n def result_has_no_configuration():\n instance = instance_info.dbaas.instances.get(inst_info.id)\n if hasattr(instance, 'configuration'):\n return False\n else:\n return True\n\n inst_info = instance_info\n poll_until(result_has_no_configuration)\n inst_info = configuration_instance\n poll_until(result_has_no_configuration)\n\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)", "def test_patch_hyperflex_cluster_network_policy(self):\n pass", "def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def preflight(self, connection):\n return True", "def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def test_update_instance_limit1(self):\n pass", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def test_crud_autoscaler(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster autoscaler\n response = self._create_autoscaler(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n autoscaler_id = self._list_autoscalers(cluster_id)\n\n # update autoscaler\n response = self._update_autoscaler(cluster_id, autoscaler_id)\n self.assertDictContainsSubset(self.AUTOSCALER_UPDATE_DATA, response)\n\n # check it exists\n autoscaler_id = self._check_autoscaler_exists(cluster_id, autoscaler_id)\n\n # delete the object\n response = self._delete_autoscaler(cluster_id, autoscaler_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_autoscalers_exist(cluster_id)", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def failover_replicate(self, req, id, body):\n LOG.info(_LI(\"Failover volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n params = body.get('failover_replicate', {})\n\n checkpoint_id = params.get('checkpoint_id', None)\n force = params.get('force', False)\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.failover_replicate(\n context, volume, checkpoint_id, force)\n return self._view_builder.action_summary(req, replicate_info)", "def enable_instance_modification(self):\n self._request({\"enable-instance-modification\": True})", "def test_create_cluster_resource_quota(self):\n pass", "def test_delete_cluster_policy(self):\n pass", "def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)" ]
[ "0.6234873", "0.6035954", "0.56939065", "0.56453234", "0.5645263", "0.54759395", "0.54519767", "0.5399038", "0.53523755", "0.53196", "0.529082", "0.5268986", "0.52373624", "0.51523685", "0.5136066", "0.5091523", "0.5065374", "0.50623316", "0.50536805", "0.50391704", "0.503182", "0.5031589", "0.5004196", "0.5000984", "0.49986354", "0.49963677", "0.4987731", "0.49693286", "0.4949526", "0.49434823", "0.49400106", "0.4903019", "0.48964983", "0.4886887", "0.48857287", "0.48822886", "0.48766133", "0.48703787", "0.4846582", "0.48440766", "0.48377573", "0.48376986", "0.48318493", "0.48215654", "0.48213914", "0.48190117", "0.48135677", "0.48124337", "0.48063397", "0.48028287", "0.47985765", "0.4789567", "0.47890207", "0.4767835", "0.47664934", "0.47646177", "0.47327355", "0.47283173", "0.47198138", "0.4709496", "0.4697271", "0.46952263", "0.46946916", "0.46936414", "0.46935812", "0.46930057", "0.468307", "0.468263", "0.46772087", "0.46759355", "0.4675536", "0.46707314", "0.46652606", "0.4663906", "0.46628463", "0.46589178", "0.46572253", "0.46524668", "0.46483016", "0.46480554", "0.46388623", "0.46350354", "0.4632795", "0.4631195", "0.46306124", "0.46291834", "0.4627013", "0.4622988", "0.4618733", "0.4615836", "0.4606239", "0.46041548", "0.46036756", "0.46027562", "0.45996165", "0.4592503", "0.45851958", "0.4579642", "0.45796013", "0.45775968", "0.45754504" ]
0.0
-1
This operation is applicable to replica set instances and standalone instances, but not to sharded cluster instances.
def describe_replica_set_role( self, request: dds_20151201_models.DescribeReplicaSetRoleRequest, ) -> dds_20151201_models.DescribeReplicaSetRoleResponse: runtime = util_models.RuntimeOptions() return self.describe_replica_set_role_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_patch_cluster_role(self):\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_replace_cluster_role(self):\n pass", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_patch_hyperflex_cluster(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_update_hyperflex_cluster(self):\n pass", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def test_read_cluster_role(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_read_cluster_policy(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def test_create_cluster_role(self):\n pass", "def test_index_nas_shares_by_pool(self):\n pass", "def delete_cluster(self):", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def _same_instance(client1, client2):\n return client1._topology_settings.seeds == client2._topology_settings.seeds", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def test_delete_collection_cluster_policy(self):\n pass", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_update_nas_share_by_pool(self):\n pass", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_patch_cluster_role_binding(self):\n pass", "def replication(self):\n return self._replication", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_patch_cluster_network(self):\n pass", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def replicas(self, replicas):\n\n self._replicas = replicas", "def disable_replicate(self, req, id, body):\n LOG.info(_LI(\"Disable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.disable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_replace_cluster_network(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def test_create_cluster_policy(self):\n pass", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def test_delete_cluster_role(self):\n pass", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def test_list_cluster_policy(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def get_replica_ips(self):\n return self.membership", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def test_replace_cluster_role_binding(self):\n pass", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_list_cluster_role(self):\n pass", "def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)", "def resource_type(self):\n return 'cluster'", "def test_patch_hyperflex_cluster_network_policy(self):\n pass", "def test_unassign_configuration_from_instances(self):\n instance_info.dbaas.instances.update(configuration_instance.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n instance_info.dbaas.instances.get(instance_info.id)\n\n def result_has_no_configuration():\n instance = instance_info.dbaas.instances.get(inst_info.id)\n if hasattr(instance, 'configuration'):\n return False\n else:\n return True\n\n inst_info = instance_info\n poll_until(result_has_no_configuration)\n inst_info = configuration_instance\n poll_until(result_has_no_configuration)\n\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)", "def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def preflight(self, connection):\n return True", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def test_update_instance_limit1(self):\n pass", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def test_crud_autoscaler(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster autoscaler\n response = self._create_autoscaler(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n autoscaler_id = self._list_autoscalers(cluster_id)\n\n # update autoscaler\n response = self._update_autoscaler(cluster_id, autoscaler_id)\n self.assertDictContainsSubset(self.AUTOSCALER_UPDATE_DATA, response)\n\n # check it exists\n autoscaler_id = self._check_autoscaler_exists(cluster_id, autoscaler_id)\n\n # delete the object\n response = self._delete_autoscaler(cluster_id, autoscaler_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_autoscalers_exist(cluster_id)", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def failover_replicate(self, req, id, body):\n LOG.info(_LI(\"Failover volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n params = body.get('failover_replicate', {})\n\n checkpoint_id = params.get('checkpoint_id', None)\n force = params.get('force', False)\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.failover_replicate(\n context, volume, checkpoint_id, force)\n return self._view_builder.action_summary(req, replicate_info)", "def enable_instance_modification(self):\n self._request({\"enable-instance-modification\": True})", "def test_create_cluster_resource_quota(self):\n pass", "def test_delete_cluster_policy(self):\n pass", "def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)" ]
[ "0.6235598", "0.6034554", "0.5694733", "0.5646017", "0.56440216", "0.54747474", "0.5452212", "0.53970134", "0.53528446", "0.5319955", "0.52906865", "0.5269184", "0.523622", "0.5152003", "0.51320475", "0.50894994", "0.5066048", "0.50578713", "0.50511944", "0.5039058", "0.5033668", "0.50330997", "0.5004842", "0.50030345", "0.49963602", "0.4995974", "0.49865982", "0.49673122", "0.4951828", "0.49394482", "0.4938433", "0.49033666", "0.48949558", "0.4886562", "0.4885587", "0.48833707", "0.48725247", "0.48711684", "0.48439056", "0.4841425", "0.4839226", "0.48380563", "0.48333198", "0.48204106", "0.48185363", "0.48165423", "0.4813661", "0.48123437", "0.4803667", "0.48026344", "0.4796802", "0.47883546", "0.47881228", "0.47649378", "0.47643968", "0.47617927", "0.47302407", "0.4723675", "0.471896", "0.47074193", "0.46965697", "0.46936017", "0.46926036", "0.46913958", "0.46910277", "0.46905157", "0.46831277", "0.46791822", "0.46774694", "0.46753186", "0.46730226", "0.46706218", "0.46637732", "0.46635908", "0.46627632", "0.46559414", "0.46557987", "0.46494275", "0.46485764", "0.46482638", "0.46394807", "0.46356714", "0.46339294", "0.4630341", "0.46280196", "0.46255767", "0.46248707", "0.46246627", "0.46181569", "0.46125743", "0.46041498", "0.460292", "0.46026477", "0.4602283", "0.45976254", "0.45908973", "0.45833734", "0.45805952", "0.45797732", "0.45767343", "0.45719993" ]
0.0
-1
This operation is applicable to replica set instances and standalone instances, but not to sharded cluster instances.
async def describe_replica_set_role_async( self, request: dds_20151201_models.DescribeReplicaSetRoleRequest, ) -> dds_20151201_models.DescribeReplicaSetRoleResponse: runtime = util_models.RuntimeOptions() return await self.describe_replica_set_role_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_patch_cluster_role(self):\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_replace_cluster_role(self):\n pass", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_patch_hyperflex_cluster(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_update_hyperflex_cluster(self):\n pass", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def test_read_cluster_role(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def test_read_cluster_policy(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def test_create_cluster_role(self):\n pass", "def test_index_nas_shares_by_pool(self):\n pass", "def delete_cluster(self):", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def _same_instance(client1, client2):\n return client1._topology_settings.seeds == client2._topology_settings.seeds", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def test_delete_collection_cluster_policy(self):\n pass", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def test_update_nas_share_by_pool(self):\n pass", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_patch_cluster_role_binding(self):\n pass", "def replication(self):\n return self._replication", "def test_patch_cluster_network(self):\n pass", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def replicas(self, replicas):\n\n self._replicas = replicas", "def disable_replicate(self, req, id, body):\n LOG.info(_LI(\"Disable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.disable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_replace_cluster_network(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def test_create_cluster_policy(self):\n pass", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):\n modified_pools = []\n if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):\n config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')\n worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])\n rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)\n for pool_config in worker_pools_config:\n if not worker_pool_names or pool_config['name'] in worker_pool_names:\n if rename_worker_pools:\n pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n elif pool_config['enabled'] != enabled:\n pool_config['enabled'] = enabled\n modified_pools.append(pool_config['name'])\n if modified_pools:\n config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False) # TODO: Remove ensure_ascii when migration to py3 is complete\n k8s.apply(config_map)\n else:\n logger.info('Cluster autoscaler is not present')\n return modified_pools", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def test_delete_cluster_role(self):\n pass", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def test_list_cluster_policy(self):\n pass", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def cluster_run(self, cmd):\n instances = self.service.get_instances()\n responses = []\n for instance in instances:\n success, output = self.run_remote_script(cmd, instance=instance)\n responses.append((success, output))\n return responses", "def get_replica_ips(self):\n return self.membership", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_replace_cluster_role_binding(self):\n pass", "def test_list_cluster_role(self):\n pass", "def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)", "def resource_type(self):\n return 'cluster'", "def test_patch_hyperflex_cluster_network_policy(self):\n pass", "def test_unassign_configuration_from_instances(self):\n instance_info.dbaas.instances.update(configuration_instance.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n instance_info.dbaas.instances.update(instance_info.id,\n remove_configuration=True)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n instance_info.dbaas.instances.get(instance_info.id)\n\n def result_has_no_configuration():\n instance = instance_info.dbaas.instances.get(inst_info.id)\n if hasattr(instance, 'configuration'):\n return False\n else:\n return True\n\n inst_info = instance_info\n poll_until(result_has_no_configuration)\n inst_info = configuration_instance\n poll_until(result_has_no_configuration)\n\n instance = instance_info.dbaas.instances.get(instance_info.id)\n assert_equal('RESTART_REQUIRED', instance.status)", "def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def preflight(self, connection):\n return True", "def test_no_sync_correctness(self):\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_no_sync_correctness,\n )", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_reboot(cluster):\n map(reboot, cluster)", "def test_update_instance_limit1(self):\n pass", "def test_crud_autoscaler(self):\n # create the parent cluster\n cluster_id = self._create_cluster()\n\n # create cluster autoscaler\n response = self._create_autoscaler(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n # list existing objects\n autoscaler_id = self._list_autoscalers(cluster_id)\n\n # update autoscaler\n response = self._update_autoscaler(cluster_id, autoscaler_id)\n self.assertDictContainsSubset(self.AUTOSCALER_UPDATE_DATA, response)\n\n # check it exists\n autoscaler_id = self._check_autoscaler_exists(cluster_id, autoscaler_id)\n\n # delete the object\n response = self._delete_autoscaler(cluster_id, autoscaler_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # check it no longer exists\n self._check_no_autoscalers_exist(cluster_id)", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def failover_replicate(self, req, id, body):\n LOG.info(_LI(\"Failover volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n params = body.get('failover_replicate', {})\n\n checkpoint_id = params.get('checkpoint_id', None)\n force = params.get('force', False)\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.failover_replicate(\n context, volume, checkpoint_id, force)\n return self._view_builder.action_summary(req, replicate_info)", "def enable_instance_modification(self):\n self._request({\"enable-instance-modification\": True})", "def test_create_cluster_resource_quota(self):\n pass", "def test_delete_cluster_policy(self):\n pass", "def test_01_dedicated_cluster_allocation(self):\n\n # Step 1\n dedicateCmd = dedicateCluster.dedicateClusterCmd()\n dedicateCmd.clusterid = self.clusters[0].id\n dedicateCmd.domainid = self.domain.id\n dedicateCmd.account = self.account_1.name\n self.apiclient.dedicateCluster(dedicateCmd)\n\n afcmd = listAffinityGroups.listAffinityGroupsCmd()\n afcmd.account = self.account_1.name\n afcmd.domainid = self.account_1.domainid\n affinitygr_list = self.apiclient.listAffinityGroups(afcmd)\n\n # Step 2\n self.vm = VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n affinitygroupids=[affinitygr_list[0].id],\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n # Steps to verify if VM is created on dedicated account\n vmlist = list_virtual_machines(self.apiclient,\n id=self.vm.id)\n\n hostlist = list_hosts(self.apiclient,\n id=vmlist[0].hostid)\n\n self.assertEqual(hostlist[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on dedicated clusture\"\n )\n # Step 3\n self.vm_1 = VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n mode=self.zone.networktype\n )\n\n # Steps to verify if VM is created on dedicated account\n vmlist_1 = list_virtual_machines(self.apiclient,\n id=self.vm_1.id)\n\n hostlist_1 = list_hosts(self.apiclient,\n id=vmlist_1[0].hostid)\n\n self.assertNotEqual(hostlist_1[0].clusterid,\n self.clusters[0].id,\n \"check if vm gets deployed on correct clusture\"\n )\n\n # Step 4\n routerList = list_routers(self.apiclient,\n clusterid=self.clusters[0].id,\n networkid=self.vm_1.nic[0].networkid\n )\n self.assertEqual(\n routerList,\n None,\n \"Check Dedicated cluster is used for virtual routers \\\n that belong to non-dedicated account\")\n\n return", "def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)" ]
[ "0.623168", "0.603755", "0.5694353", "0.56472486", "0.56435645", "0.5474983", "0.5451861", "0.5392919", "0.53544354", "0.53199965", "0.5290761", "0.52693903", "0.5234935", "0.5150445", "0.5137627", "0.50857395", "0.50686383", "0.5060258", "0.50519294", "0.5039493", "0.5033433", "0.5032968", "0.5003775", "0.5002626", "0.49974394", "0.49967846", "0.49861974", "0.49642357", "0.4951251", "0.49439925", "0.4940513", "0.4902529", "0.4893421", "0.4887058", "0.488679", "0.48859382", "0.48734456", "0.48729318", "0.48424408", "0.48407492", "0.48385313", "0.48365286", "0.48344707", "0.4823285", "0.48188427", "0.48188224", "0.48125416", "0.48093906", "0.4803567", "0.47993007", "0.47967416", "0.47882706", "0.47866255", "0.47667053", "0.4763093", "0.47622836", "0.47316208", "0.4726898", "0.4720037", "0.470443", "0.46977246", "0.4694325", "0.4694063", "0.46931762", "0.4692432", "0.46915314", "0.4683299", "0.46814567", "0.46784243", "0.46732897", "0.4673155", "0.46712235", "0.46673593", "0.46656334", "0.46645927", "0.46594805", "0.46534425", "0.46499673", "0.46490094", "0.46477193", "0.46391717", "0.46344528", "0.4631677", "0.46303886", "0.46285662", "0.4627357", "0.4624468", "0.46227995", "0.46177706", "0.46165967", "0.4606199", "0.46049762", "0.46031752", "0.46027347", "0.45930704", "0.4591272", "0.45837167", "0.45811903", "0.45805147", "0.4579441", "0.45731068" ]
0.0
-1
> For more information, see [View the zone of a node](~~123825~~). This operation is applicable only to replica set and sharded cluster instances, but not to standalone instances.
def describe_role_zone_info_with_options( self, request: dds_20151201_models.DescribeRoleZoneInfoRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRoleZoneInfoResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRoleZoneInfo', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRoleZoneInfoResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_zone():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/zone',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+zones/(.+)', r'\\1', r.text)\n else:\n return ''", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def ex_view_zone(self, domain, name_server):\n params = {\"DOMAIN\": domain, \"NS\": name_server}\n action = \"/api_dns_viewzone.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_viewzone_reseller.asp\"\n response = self.connection.request(action, params=params)\n return response.object", "def zone(self):\n return self._zone", "def zone(self) -> str:\n return self._zone", "def availability_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"availability_zone\")", "def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n", "def __zone_object_address(self):\n\t\treturn self.__c_version.object_address()", "def sc_dns_zone(self):\n return self._sc_dns_zone", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def zone_name(self):\n return self._zone_name", "def show(self, req, id):\n zone_id = int(id)\n zone = api.zone_get(req.environ['nova.context'], zone_id)\n return dict(zone=_scrub_zone(zone))", "def access_zone(self):\n return self._access_zone", "def local_zone():\n return get_localzone()", "def get_zone(zone=\"\", vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone/entry[@name='{}']\".format(vsys, zone)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def availability_zone(self) -> str:\n return pulumi.get(self, \"availability_zone\")", "def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)", "def secondary_gce_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def get_current_zone() -> Zone:\n return services.current_zone()", "def SecurityZone(self) -> _n_6_t_7:", "def SecurityZone(self) -> _n_6_t_7:", "def do_zone_event(client, args):\n args.type = 'zone'\n do_event_show(client, args)", "def Get(self, name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n fully_qualified_node_name_ref = resources.REGISTRY.Parse(\n name,\n params={\n 'locationsId': zone,\n 'projectsId': project\n },\n collection='tpu.projects.locations.nodes',\n )\n request = self.messages.TpuProjectsLocationsNodesGetRequest(\n name=fully_qualified_node_name_ref.RelativeName())\n return self.client.projects_locations_nodes.Get(request)", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def secondary_gce_zone(self) -> str:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def availability_zone(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'", "def get_region(zone):\n return '-'.join(zone.split('-')[:2])", "def get_zone(self, kwargs):\n dns_zone = kwargs[\"dns_zone\"]\n try:\n results = self.engine.query(\n self.engine.ZONE_FILTER(),\n base=','.join([f\"DC={dns_zone}\", \"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n )\n except LdapActiveDirectoryView.ActiveDirectoryLdapException as e:\n error(e)\n else:\n self.display(results)", "def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")", "def test_get_zone_from_account(self):\n account = Account('test-account')\n zone = Zone('test.example.com')\n account.add_zone(zone)\n self.assertEqual(account.get_zone('test.example.com'), zone)", "def availability_zone(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"availability_zone\")", "def zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"zone_id\")", "def snapshot(self):\n self._snapshot = self._monoprice.zone_status(self._zone_id)", "def test_show_config_node(self):\n config_node = self._create_config_node()\n config_node_uuid = config_node['config-node']['uuid']\n with self.override_role():\n self.config_client.show_config_node(config_node_uuid)", "def get_zone_connection(self):\n return self.m_connection.zones", "def describe_role_zone_info(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_role_zone_info_with_options(request, runtime)", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_single_zone(self, region):\n # TODO: Implement zones list\n # Hardcoded to us-west2\n z = {\n \"us-west2\": ['us-west2-a', 'us-west2-b', 'us-west2-c'],\n \"us-west1\": ['us-west1-a', 'us-west1-b', 'us-west1-c']\n }\n return z[region][:1]", "def get_zone_id(ctx, param, zone_name):\n del ctx #unused\n del param #unused\n cf = CloudFlare.CloudFlare()\n zones = cf.zones.get(params={'name': zone_name})\n if len(zones) != 1:\n raise click.ClickException('Invalid zone name: {}'.format(zone_name))\n return (zones[0]['id'], zones[0]['name'])", "def getNodeAddress(self, node):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getAddress(node)\n else:\n if node == \"system-manager\":\n return self._systemManagerAddress.value\n elif node == self.node:\n return self.address\n else:\n return None", "def getZoneId(self):\n return self.zoneId", "def getNodeAddress(self, node):\n return self.backend.configuration.getAddress(node)", "def getZoneId(self):\n #self.zoneId\n if self.zoneId:\n return self.zoneId\n else:\n self.notify.warning(\"no zone id available\")", "def get_zone(self):\n to_return = None\n try:\n to_return = self.ns1.loadZone(self.module.params.get('zone'))\n except ResourceException as re:\n if re.response.code == 404:\n if (\n self.module.params.get('ignore_missing_zone')\n and self.module.params.get('state') == \"absent\"\n ):\n # zone not found but we are in the absent state\n # and the user doesn't care that the zone doesn't exist\n # nothing to do and no change\n self.module.exit_json(changed=False)\n else:\n # generic error or user cares about missing zone\n self.module.fail_json(\n msg=\"error code %s - %s \" % (re.response.code, re.message)\n )\n return to_return", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def ValidateZone(args):\n args.zone = args.zone or properties.VALUES.compute.zone.Get(required=True)", "def get_instance(self, node_id: str) -> \"GCPNode\":\n return", "def zoneadm():\n\n ret = run_cmd('/usr/sbin/zoneadm list -pc')\n\n if isinstance(ret, basestring):\n return [ret]\n else:\n return ret", "def get_current_zone_id() -> int:\n return services.current_zone_id()", "def dump_zone(self, zone_id):\r\n return self.service.getZoneFileContents(id=zone_id)", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_zone(self, zone_id, records=True):\r\n mask = None\r\n if records:\r\n mask = 'resourceRecords'\r\n return self.service.getObject(id=zone_id, mask=mask)", "def list_zones(self):\n\n return [zone[\"zone\"] for zone in list(self._zones.values())]", "def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)", "def smart_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.SMART_ZONE, self._SW_VER)]", "def zones(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data", "def get_zones(self, context):\n # handling zones method in RPC\n response = self.dns_manager.get_zones(context)\n return response", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def hosted_zone_by_id(client: boto3.client, zone_id: str) -> AWSResponse:\n try:\n return client.get_hosted_zone(Id=zone_id)\n except ClientError as e:\n logger.exception(e.response['Error']['Message'])\n return {'HostedZone': []}", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_region_db_detail(self, context, id):\n zone_obj = self.dns_manager.get_region_db_detail(context, id)\n return zone_obj", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def getTaskZoneId(self):\n return self.getZoneId()", "def get_zones(self, latitude, longitude):\n result = self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/zones?lat={}&lng={}\".format(latitude, longitude),\n )\n if result and \"zones\" in result:\n return result[\"zones\"]", "def show_node(self):\n if self.controller.node_id:\n self.print_object(\n 'node',\n ('uid', 'status', 'roles'),\n self.controller.get_node()\n )\n else:\n print(\"Please select node at first.\")", "def get_zones_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n status: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZonesResult]:\n ...", "def test_openshift_node_with_cluster_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def list_zone_names():\n get_name = lambda a: a.get_name()\n return map(get_name, list_zones())", "def get_zones(vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone\".format(vsys)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def default_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.DEFAULT_ZONE, self._SW_VER)]", "def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "async def describe_role_zone_info_async(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_role_zone_info_with_options_async(request, runtime)", "def get_zone_db_details(self, context, id):\n zone_obj = self.dns_manager.get_zone_db_details(context, id)\n return zone_obj", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def secondary_gce_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def test_zone_repr(self):\n zone = Zone('example.com')\n self.assertEqual(f'{zone}', 'Zone<example.com>')" ]
[ "0.6436603", "0.61140853", "0.61140853", "0.6104373", "0.60225934", "0.5837868", "0.57854277", "0.5775233", "0.5758423", "0.575697", "0.5726742", "0.5717086", "0.5708644", "0.5697726", "0.56893647", "0.5682885", "0.5647697", "0.5601878", "0.55982333", "0.5585867", "0.55534434", "0.5550864", "0.5550864", "0.5538219", "0.5502469", "0.5502469", "0.5490892", "0.5476695", "0.54360735", "0.54360735", "0.54360735", "0.54360735", "0.54286563", "0.54142606", "0.54142606", "0.54142606", "0.54142606", "0.54142606", "0.53942", "0.53587186", "0.53558004", "0.53073406", "0.52948445", "0.526071", "0.5256926", "0.5252223", "0.52334285", "0.5231368", "0.52090317", "0.5199382", "0.5198366", "0.51365614", "0.5135563", "0.51238966", "0.51202184", "0.5115897", "0.5114413", "0.5106094", "0.51046395", "0.5090261", "0.5078891", "0.50716424", "0.50647193", "0.5061596", "0.50399673", "0.5038904", "0.5037864", "0.5022581", "0.501279", "0.5012644", "0.50116557", "0.50112754", "0.50112754", "0.5009906", "0.4999363", "0.49782416", "0.49748358", "0.49748358", "0.49705502", "0.49652004", "0.49578074", "0.49555537", "0.4949503", "0.49487668", "0.49461904", "0.49454615", "0.4944212", "0.4943282", "0.49176267", "0.4906959", "0.4900278", "0.48998973", "0.48998973", "0.4895487", "0.48849136", "0.48742825", "0.48742825", "0.48735833", "0.48733598", "0.4870302", "0.48663232" ]
0.0
-1
> For more information, see [View the zone of a node](~~123825~~). This operation is applicable only to replica set and sharded cluster instances, but not to standalone instances.
async def describe_role_zone_info_with_options_async( self, request: dds_20151201_models.DescribeRoleZoneInfoRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRoleZoneInfoResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRoleZoneInfo', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRoleZoneInfoResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_zone():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/zone',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+zones/(.+)', r'\\1', r.text)\n else:\n return ''", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def ex_view_zone(self, domain, name_server):\n params = {\"DOMAIN\": domain, \"NS\": name_server}\n action = \"/api_dns_viewzone.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_viewzone_reseller.asp\"\n response = self.connection.request(action, params=params)\n return response.object", "def zone(self):\n return self._zone", "def zone(self) -> str:\n return self._zone", "def availability_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"availability_zone\")", "def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n", "def __zone_object_address(self):\n\t\treturn self.__c_version.object_address()", "def sc_dns_zone(self):\n return self._sc_dns_zone", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def zone_name(self):\n return self._zone_name", "def show(self, req, id):\n zone_id = int(id)\n zone = api.zone_get(req.environ['nova.context'], zone_id)\n return dict(zone=_scrub_zone(zone))", "def access_zone(self):\n return self._access_zone", "def local_zone():\n return get_localzone()", "def get_zone(zone=\"\", vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone/entry[@name='{}']\".format(vsys, zone)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def availability_zone(self) -> str:\n return pulumi.get(self, \"availability_zone\")", "def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)", "def secondary_gce_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def get_current_zone() -> Zone:\n return services.current_zone()", "def SecurityZone(self) -> _n_6_t_7:", "def SecurityZone(self) -> _n_6_t_7:", "def do_zone_event(client, args):\n args.type = 'zone'\n do_event_show(client, args)", "def Get(self, name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n fully_qualified_node_name_ref = resources.REGISTRY.Parse(\n name,\n params={\n 'locationsId': zone,\n 'projectsId': project\n },\n collection='tpu.projects.locations.nodes',\n )\n request = self.messages.TpuProjectsLocationsNodesGetRequest(\n name=fully_qualified_node_name_ref.RelativeName())\n return self.client.projects_locations_nodes.Get(request)", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def secondary_gce_zone(self) -> str:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def availability_zone(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'", "def get_region(zone):\n return '-'.join(zone.split('-')[:2])", "def get_zone(self, kwargs):\n dns_zone = kwargs[\"dns_zone\"]\n try:\n results = self.engine.query(\n self.engine.ZONE_FILTER(),\n base=','.join([f\"DC={dns_zone}\", \"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n )\n except LdapActiveDirectoryView.ActiveDirectoryLdapException as e:\n error(e)\n else:\n self.display(results)", "def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")", "def test_get_zone_from_account(self):\n account = Account('test-account')\n zone = Zone('test.example.com')\n account.add_zone(zone)\n self.assertEqual(account.get_zone('test.example.com'), zone)", "def availability_zone(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"availability_zone\")", "def zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"zone_id\")", "def snapshot(self):\n self._snapshot = self._monoprice.zone_status(self._zone_id)", "def test_show_config_node(self):\n config_node = self._create_config_node()\n config_node_uuid = config_node['config-node']['uuid']\n with self.override_role():\n self.config_client.show_config_node(config_node_uuid)", "def get_zone_connection(self):\n return self.m_connection.zones", "def describe_role_zone_info(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_role_zone_info_with_options(request, runtime)", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_zone_id(ctx, param, zone_name):\n del ctx #unused\n del param #unused\n cf = CloudFlare.CloudFlare()\n zones = cf.zones.get(params={'name': zone_name})\n if len(zones) != 1:\n raise click.ClickException('Invalid zone name: {}'.format(zone_name))\n return (zones[0]['id'], zones[0]['name'])", "def get_single_zone(self, region):\n # TODO: Implement zones list\n # Hardcoded to us-west2\n z = {\n \"us-west2\": ['us-west2-a', 'us-west2-b', 'us-west2-c'],\n \"us-west1\": ['us-west1-a', 'us-west1-b', 'us-west1-c']\n }\n return z[region][:1]", "def getNodeAddress(self, node):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getAddress(node)\n else:\n if node == \"system-manager\":\n return self._systemManagerAddress.value\n elif node == self.node:\n return self.address\n else:\n return None", "def getZoneId(self):\n return self.zoneId", "def getZoneId(self):\n #self.zoneId\n if self.zoneId:\n return self.zoneId\n else:\n self.notify.warning(\"no zone id available\")", "def getNodeAddress(self, node):\n return self.backend.configuration.getAddress(node)", "def get_zone(self):\n to_return = None\n try:\n to_return = self.ns1.loadZone(self.module.params.get('zone'))\n except ResourceException as re:\n if re.response.code == 404:\n if (\n self.module.params.get('ignore_missing_zone')\n and self.module.params.get('state') == \"absent\"\n ):\n # zone not found but we are in the absent state\n # and the user doesn't care that the zone doesn't exist\n # nothing to do and no change\n self.module.exit_json(changed=False)\n else:\n # generic error or user cares about missing zone\n self.module.fail_json(\n msg=\"error code %s - %s \" % (re.response.code, re.message)\n )\n return to_return", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def ValidateZone(args):\n args.zone = args.zone or properties.VALUES.compute.zone.Get(required=True)", "def get_instance(self, node_id: str) -> \"GCPNode\":\n return", "def zoneadm():\n\n ret = run_cmd('/usr/sbin/zoneadm list -pc')\n\n if isinstance(ret, basestring):\n return [ret]\n else:\n return ret", "def get_current_zone_id() -> int:\n return services.current_zone_id()", "def dump_zone(self, zone_id):\r\n return self.service.getZoneFileContents(id=zone_id)", "def get_zone(self, zone_id, records=True):\r\n mask = None\r\n if records:\r\n mask = 'resourceRecords'\r\n return self.service.getObject(id=zone_id, mask=mask)", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_node_ip(\n self,\n name,\n ):\n pass", "def list_zones(self):\n\n return [zone[\"zone\"] for zone in list(self._zones.values())]", "def smart_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.SMART_ZONE, self._SW_VER)]", "def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def zones(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data", "def get_zones(self, context):\n # handling zones method in RPC\n response = self.dns_manager.get_zones(context)\n return response", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def hosted_zone_by_id(client: boto3.client, zone_id: str) -> AWSResponse:\n try:\n return client.get_hosted_zone(Id=zone_id)\n except ClientError as e:\n logger.exception(e.response['Error']['Message'])\n return {'HostedZone': []}", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_region_db_detail(self, context, id):\n zone_obj = self.dns_manager.get_region_db_detail(context, id)\n return zone_obj", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def getTaskZoneId(self):\n return self.getZoneId()", "def get_zones(self, latitude, longitude):\n result = self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/zones?lat={}&lng={}\".format(latitude, longitude),\n )\n if result and \"zones\" in result:\n return result[\"zones\"]", "def get_zones_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n status: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZonesResult]:\n ...", "def list_zone_names():\n get_name = lambda a: a.get_name()\n return map(get_name, list_zones())", "def test_openshift_node_with_cluster_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def show_node(self):\n if self.controller.node_id:\n self.print_object(\n 'node',\n ('uid', 'status', 'roles'),\n self.controller.get_node()\n )\n else:\n print(\"Please select node at first.\")", "def get_zones(vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone\".format(vsys)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def default_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.DEFAULT_ZONE, self._SW_VER)]", "def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "async def describe_role_zone_info_async(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_role_zone_info_with_options_async(request, runtime)", "def get_zone_db_details(self, context, id):\n zone_obj = self.dns_manager.get_zone_db_details(context, id)\n return zone_obj", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def secondary_gce_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def test_zone_repr(self):\n zone = Zone('example.com')\n self.assertEqual(f'{zone}', 'Zone<example.com>')" ]
[ "0.64396524", "0.61155885", "0.61155885", "0.6105519", "0.6023958", "0.5839498", "0.5786897", "0.57711685", "0.57587624", "0.57580996", "0.57276356", "0.5715351", "0.5709695", "0.56995195", "0.5690427", "0.5684144", "0.5648645", "0.560363", "0.5599775", "0.5587867", "0.55546004", "0.5552469", "0.5552469", "0.5540242", "0.55020535", "0.55020535", "0.5491981", "0.5476239", "0.54371005", "0.54371005", "0.54371005", "0.54371005", "0.54300374", "0.54154855", "0.54154855", "0.54154855", "0.54154855", "0.54154855", "0.5395948", "0.5360283", "0.5357902", "0.5310607", "0.52966636", "0.52637136", "0.52585334", "0.52537984", "0.52331036", "0.52283335", "0.5210334", "0.5200934", "0.5194467", "0.5138307", "0.5137794", "0.5122374", "0.51216626", "0.5115908", "0.5114371", "0.5108328", "0.510351", "0.5092652", "0.5076363", "0.50728303", "0.5065594", "0.5063445", "0.50408894", "0.50366974", "0.50356805", "0.50245726", "0.5013482", "0.5013288", "0.5012945", "0.5012945", "0.501261", "0.5010483", "0.49998876", "0.4979913", "0.49759042", "0.49759042", "0.49732116", "0.49647275", "0.49588466", "0.49535137", "0.49507776", "0.49498823", "0.4947177", "0.4944998", "0.4941597", "0.4941419", "0.49182984", "0.49070328", "0.49023563", "0.49017185", "0.49017185", "0.48967624", "0.48868686", "0.4875392", "0.4875392", "0.48747182", "0.48718223", "0.4871074", "0.48680803" ]
0.0
-1
> For more information, see [View the zone of a node](~~123825~~). This operation is applicable only to replica set and sharded cluster instances, but not to standalone instances.
def describe_role_zone_info( self, request: dds_20151201_models.DescribeRoleZoneInfoRequest, ) -> dds_20151201_models.DescribeRoleZoneInfoResponse: runtime = util_models.RuntimeOptions() return self.describe_role_zone_info_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_zone():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/zone',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+zones/(.+)', r'\\1', r.text)\n else:\n return ''", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def ex_view_zone(self, domain, name_server):\n params = {\"DOMAIN\": domain, \"NS\": name_server}\n action = \"/api_dns_viewzone.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_viewzone_reseller.asp\"\n response = self.connection.request(action, params=params)\n return response.object", "def zone(self):\n return self._zone", "def zone(self) -> str:\n return self._zone", "def availability_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"availability_zone\")", "def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n", "def __zone_object_address(self):\n\t\treturn self.__c_version.object_address()", "def sc_dns_zone(self):\n return self._sc_dns_zone", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def zone_name(self):\n return self._zone_name", "def show(self, req, id):\n zone_id = int(id)\n zone = api.zone_get(req.environ['nova.context'], zone_id)\n return dict(zone=_scrub_zone(zone))", "def access_zone(self):\n return self._access_zone", "def local_zone():\n return get_localzone()", "def get_zone(zone=\"\", vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone/entry[@name='{}']\".format(vsys, zone)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def availability_zone(self) -> str:\n return pulumi.get(self, \"availability_zone\")", "def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)", "def secondary_gce_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def get_current_zone() -> Zone:\n return services.current_zone()", "def SecurityZone(self) -> _n_6_t_7:", "def SecurityZone(self) -> _n_6_t_7:", "def do_zone_event(client, args):\n args.type = 'zone'\n do_event_show(client, args)", "def Get(self, name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n fully_qualified_node_name_ref = resources.REGISTRY.Parse(\n name,\n params={\n 'locationsId': zone,\n 'projectsId': project\n },\n collection='tpu.projects.locations.nodes',\n )\n request = self.messages.TpuProjectsLocationsNodesGetRequest(\n name=fully_qualified_node_name_ref.RelativeName())\n return self.client.projects_locations_nodes.Get(request)", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def secondary_gce_zone(self) -> str:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def availability_zone(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'", "def get_region(zone):\n return '-'.join(zone.split('-')[:2])", "def get_zone(self, kwargs):\n dns_zone = kwargs[\"dns_zone\"]\n try:\n results = self.engine.query(\n self.engine.ZONE_FILTER(),\n base=','.join([f\"DC={dns_zone}\", \"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n )\n except LdapActiveDirectoryView.ActiveDirectoryLdapException as e:\n error(e)\n else:\n self.display(results)", "def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")", "def test_get_zone_from_account(self):\n account = Account('test-account')\n zone = Zone('test.example.com')\n account.add_zone(zone)\n self.assertEqual(account.get_zone('test.example.com'), zone)", "def availability_zone(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"availability_zone\")", "def zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"zone_id\")", "def snapshot(self):\n self._snapshot = self._monoprice.zone_status(self._zone_id)", "def test_show_config_node(self):\n config_node = self._create_config_node()\n config_node_uuid = config_node['config-node']['uuid']\n with self.override_role():\n self.config_client.show_config_node(config_node_uuid)", "def get_zone_connection(self):\n return self.m_connection.zones", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_single_zone(self, region):\n # TODO: Implement zones list\n # Hardcoded to us-west2\n z = {\n \"us-west2\": ['us-west2-a', 'us-west2-b', 'us-west2-c'],\n \"us-west1\": ['us-west1-a', 'us-west1-b', 'us-west1-c']\n }\n return z[region][:1]", "def get_zone_id(ctx, param, zone_name):\n del ctx #unused\n del param #unused\n cf = CloudFlare.CloudFlare()\n zones = cf.zones.get(params={'name': zone_name})\n if len(zones) != 1:\n raise click.ClickException('Invalid zone name: {}'.format(zone_name))\n return (zones[0]['id'], zones[0]['name'])", "def getNodeAddress(self, node):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getAddress(node)\n else:\n if node == \"system-manager\":\n return self._systemManagerAddress.value\n elif node == self.node:\n return self.address\n else:\n return None", "def getZoneId(self):\n return self.zoneId", "def getNodeAddress(self, node):\n return self.backend.configuration.getAddress(node)", "def getZoneId(self):\n #self.zoneId\n if self.zoneId:\n return self.zoneId\n else:\n self.notify.warning(\"no zone id available\")", "def get_zone(self):\n to_return = None\n try:\n to_return = self.ns1.loadZone(self.module.params.get('zone'))\n except ResourceException as re:\n if re.response.code == 404:\n if (\n self.module.params.get('ignore_missing_zone')\n and self.module.params.get('state') == \"absent\"\n ):\n # zone not found but we are in the absent state\n # and the user doesn't care that the zone doesn't exist\n # nothing to do and no change\n self.module.exit_json(changed=False)\n else:\n # generic error or user cares about missing zone\n self.module.fail_json(\n msg=\"error code %s - %s \" % (re.response.code, re.message)\n )\n return to_return", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def ValidateZone(args):\n args.zone = args.zone or properties.VALUES.compute.zone.Get(required=True)", "def get_instance(self, node_id: str) -> \"GCPNode\":\n return", "def zoneadm():\n\n ret = run_cmd('/usr/sbin/zoneadm list -pc')\n\n if isinstance(ret, basestring):\n return [ret]\n else:\n return ret", "def get_current_zone_id() -> int:\n return services.current_zone_id()", "def dump_zone(self, zone_id):\r\n return self.service.getZoneFileContents(id=zone_id)", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_zone(self, zone_id, records=True):\r\n mask = None\r\n if records:\r\n mask = 'resourceRecords'\r\n return self.service.getObject(id=zone_id, mask=mask)", "def list_zones(self):\n\n return [zone[\"zone\"] for zone in list(self._zones.values())]", "def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)", "def smart_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.SMART_ZONE, self._SW_VER)]", "def zones(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data", "def get_zones(self, context):\n # handling zones method in RPC\n response = self.dns_manager.get_zones(context)\n return response", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def hosted_zone_by_id(client: boto3.client, zone_id: str) -> AWSResponse:\n try:\n return client.get_hosted_zone(Id=zone_id)\n except ClientError as e:\n logger.exception(e.response['Error']['Message'])\n return {'HostedZone': []}", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_region_db_detail(self, context, id):\n zone_obj = self.dns_manager.get_region_db_detail(context, id)\n return zone_obj", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def getTaskZoneId(self):\n return self.getZoneId()", "def get_zones(self, latitude, longitude):\n result = self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/zones?lat={}&lng={}\".format(latitude, longitude),\n )\n if result and \"zones\" in result:\n return result[\"zones\"]", "def show_node(self):\n if self.controller.node_id:\n self.print_object(\n 'node',\n ('uid', 'status', 'roles'),\n self.controller.get_node()\n )\n else:\n print(\"Please select node at first.\")", "def get_zones_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n status: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZonesResult]:\n ...", "def test_openshift_node_with_cluster_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def list_zone_names():\n get_name = lambda a: a.get_name()\n return map(get_name, list_zones())", "def get_zones(vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone\".format(vsys)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def default_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.DEFAULT_ZONE, self._SW_VER)]", "def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "async def describe_role_zone_info_async(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_role_zone_info_with_options_async(request, runtime)", "def get_zone_db_details(self, context, id):\n zone_obj = self.dns_manager.get_zone_db_details(context, id)\n return zone_obj", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def secondary_gce_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def test_zone_repr(self):\n zone = Zone('example.com')\n self.assertEqual(f'{zone}', 'Zone<example.com>')" ]
[ "0.6436603", "0.61140853", "0.61140853", "0.6104373", "0.60225934", "0.5837868", "0.57854277", "0.5775233", "0.5758423", "0.575697", "0.5726742", "0.5717086", "0.5708644", "0.5697726", "0.56893647", "0.5682885", "0.5647697", "0.5601878", "0.55982333", "0.5585867", "0.55534434", "0.5550864", "0.5550864", "0.5538219", "0.5502469", "0.5502469", "0.5490892", "0.5476695", "0.54360735", "0.54360735", "0.54360735", "0.54360735", "0.54286563", "0.54142606", "0.54142606", "0.54142606", "0.54142606", "0.54142606", "0.53942", "0.53587186", "0.53558004", "0.53073406", "0.52948445", "0.526071", "0.5256926", "0.5252223", "0.52334285", "0.5231368", "0.52090317", "0.5198366", "0.51365614", "0.5135563", "0.51238966", "0.51202184", "0.5115897", "0.5114413", "0.5106094", "0.51046395", "0.5090261", "0.5078891", "0.50716424", "0.50647193", "0.5061596", "0.50399673", "0.5038904", "0.5037864", "0.5022581", "0.501279", "0.5012644", "0.50116557", "0.50112754", "0.50112754", "0.5009906", "0.4999363", "0.49782416", "0.49748358", "0.49748358", "0.49705502", "0.49652004", "0.49578074", "0.49555537", "0.4949503", "0.49487668", "0.49461904", "0.49454615", "0.4944212", "0.4943282", "0.49176267", "0.4906959", "0.4900278", "0.48998973", "0.48998973", "0.4895487", "0.48849136", "0.48742825", "0.48742825", "0.48735833", "0.48733598", "0.4870302", "0.48663232" ]
0.5199382
49
> For more information, see [View the zone of a node](~~123825~~). This operation is applicable only to replica set and sharded cluster instances, but not to standalone instances.
async def describe_role_zone_info_async( self, request: dds_20151201_models.DescribeRoleZoneInfoRequest, ) -> dds_20151201_models.DescribeRoleZoneInfoResponse: runtime = util_models.RuntimeOptions() return await self.describe_role_zone_info_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_zone():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/zone',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+zones/(.+)', r'\\1', r.text)\n else:\n return ''", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def ex_view_zone(self, domain, name_server):\n params = {\"DOMAIN\": domain, \"NS\": name_server}\n action = \"/api_dns_viewzone.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_viewzone_reseller.asp\"\n response = self.connection.request(action, params=params)\n return response.object", "def zone(self):\n return self._zone", "def zone(self) -> str:\n return self._zone", "def availability_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"availability_zone\")", "def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n", "def __zone_object_address(self):\n\t\treturn self.__c_version.object_address()", "def sc_dns_zone(self):\n return self._sc_dns_zone", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def compute_zones(self):\n path = '/os-availability-zone/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack availability zone: %s' % truncate(res))\n return res[0]['availabilityZoneInfo']", "def zone_name(self):\n return self._zone_name", "def show(self, req, id):\n zone_id = int(id)\n zone = api.zone_get(req.environ['nova.context'], zone_id)\n return dict(zone=_scrub_zone(zone))", "def access_zone(self):\n return self._access_zone", "def local_zone():\n return get_localzone()", "def get_zone(zone=\"\", vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone/entry[@name='{}']\".format(vsys, zone)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def availability_zone(self) -> str:\n return pulumi.get(self, \"availability_zone\")", "def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)", "def secondary_gce_zone(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "def get_current_zone() -> Zone:\n return services.current_zone()", "def SecurityZone(self) -> _n_6_t_7:", "def SecurityZone(self) -> _n_6_t_7:", "def do_zone_event(client, args):\n args.type = 'zone'\n do_event_show(client, args)", "def Get(self, name, zone):\n project = properties.VALUES.core.project.Get(required=True)\n fully_qualified_node_name_ref = resources.REGISTRY.Parse(\n name,\n params={\n 'locationsId': zone,\n 'projectsId': project\n },\n collection='tpu.projects.locations.nodes',\n )\n request = self.messages.TpuProjectsLocationsNodesGetRequest(\n name=fully_qualified_node_name_ref.RelativeName())\n return self.client.projects_locations_nodes.Get(request)", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")", "def secondary_gce_zone(self) -> str:\n return pulumi.get(self, \"secondary_gce_zone\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")", "def availability_zone(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'", "def get_region(zone):\n return '-'.join(zone.split('-')[:2])", "def get_zone(self, kwargs):\n dns_zone = kwargs[\"dns_zone\"]\n try:\n results = self.engine.query(\n self.engine.ZONE_FILTER(),\n base=','.join([f\"DC={dns_zone}\", \"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n )\n except LdapActiveDirectoryView.ActiveDirectoryLdapException as e:\n error(e)\n else:\n self.display(results)", "def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")", "def test_get_zone_from_account(self):\n account = Account('test-account')\n zone = Zone('test.example.com')\n account.add_zone(zone)\n self.assertEqual(account.get_zone('test.example.com'), zone)", "def availability_zone(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"availability_zone\")", "def zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"zone_id\")", "def snapshot(self):\n self._snapshot = self._monoprice.zone_status(self._zone_id)", "def test_show_config_node(self):\n config_node = self._create_config_node()\n config_node_uuid = config_node['config-node']['uuid']\n with self.override_role():\n self.config_client.show_config_node(config_node_uuid)", "def get_zone_connection(self):\n return self.m_connection.zones", "def describe_role_zone_info(\n self,\n request: dds_20151201_models.DescribeRoleZoneInfoRequest,\n ) -> dds_20151201_models.DescribeRoleZoneInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_role_zone_info_with_options(request, runtime)", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def get_single_zone(self, region):\n # TODO: Implement zones list\n # Hardcoded to us-west2\n z = {\n \"us-west2\": ['us-west2-a', 'us-west2-b', 'us-west2-c'],\n \"us-west1\": ['us-west1-a', 'us-west1-b', 'us-west1-c']\n }\n return z[region][:1]", "def get_zone_id(ctx, param, zone_name):\n del ctx #unused\n del param #unused\n cf = CloudFlare.CloudFlare()\n zones = cf.zones.get(params={'name': zone_name})\n if len(zones) != 1:\n raise click.ClickException('Invalid zone name: {}'.format(zone_name))\n return (zones[0]['id'], zones[0]['name'])", "def getNodeAddress(self, node):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getAddress(node)\n else:\n if node == \"system-manager\":\n return self._systemManagerAddress.value\n elif node == self.node:\n return self.address\n else:\n return None", "def getZoneId(self):\n return self.zoneId", "def getNodeAddress(self, node):\n return self.backend.configuration.getAddress(node)", "def getZoneId(self):\n #self.zoneId\n if self.zoneId:\n return self.zoneId\n else:\n self.notify.warning(\"no zone id available\")", "def get_zone(self):\n to_return = None\n try:\n to_return = self.ns1.loadZone(self.module.params.get('zone'))\n except ResourceException as re:\n if re.response.code == 404:\n if (\n self.module.params.get('ignore_missing_zone')\n and self.module.params.get('state') == \"absent\"\n ):\n # zone not found but we are in the absent state\n # and the user doesn't care that the zone doesn't exist\n # nothing to do and no change\n self.module.exit_json(changed=False)\n else:\n # generic error or user cares about missing zone\n self.module.fail_json(\n msg=\"error code %s - %s \" % (re.response.code, re.message)\n )\n return to_return", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def ValidateZone(args):\n args.zone = args.zone or properties.VALUES.compute.zone.Get(required=True)", "def get_instance(self, node_id: str) -> \"GCPNode\":\n return", "def zoneadm():\n\n ret = run_cmd('/usr/sbin/zoneadm list -pc')\n\n if isinstance(ret, basestring):\n return [ret]\n else:\n return ret", "def get_current_zone_id() -> int:\n return services.current_zone_id()", "def dump_zone(self, zone_id):\r\n return self.service.getZoneFileContents(id=zone_id)", "def get_node_ip(self):\n return ray.services.get_node_ip_address()", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_zone(self, zone_id, records=True):\r\n mask = None\r\n if records:\r\n mask = 'resourceRecords'\r\n return self.service.getObject(id=zone_id, mask=mask)", "def list_zones(self):\n\n return [zone[\"zone\"] for zone in list(self._zones.values())]", "def smart_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.SMART_ZONE, self._SW_VER)]", "def sec_zone(module):\n margs = module.params\n\n endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])\n\n name = margs.get('name', None)\n uuid = margs.get('id', None)\n vni_id = margs.get('vni_id', None)\n vlan_id = margs.get('vlan_id', None)\n\n if vni_id:\n try:\n vni_id = int(vni_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vni_id(vni_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n if vlan_id:\n try:\n vlan_id = int(vlan_id)\n except ValueError:\n module.fail_json(msg=\"Invalid ID: must be an integer\")\n\n errors = validate_vlan_id(vlan_id)\n\n if errors:\n module.fail_json(msg=errors)\n\n sz_data = aos_get(margs['session'], endpoint)\n my_sz = {}\n\n if not uuid:\n\n for k, v in sz_data['items'].items():\n if v['label'] == name:\n my_sz = v\n else:\n\n for k, v in sz_data['items'].items():\n if v['id'] == uuid:\n my_sz = v\n\n if margs['state'] == 'absent':\n success, changed, results = sec_zone_absent(module, margs['session'],\n endpoint, my_sz)\n\n elif margs['state'] == 'present':\n success, changed, results = sec_zone_present(module, margs['session'],\n endpoint, my_sz, vni_id,\n vlan_id)\n\n if success:\n module.exit_json(changed=changed, name=results['label'],\n id=results['id'], value=results)\n else:\n module.fail_json(msg=results)", "def zones(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"zones\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"availability_zone\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data", "def get_zones(self, context):\n # handling zones method in RPC\n response = self.dns_manager.get_zones(context)\n return response", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"zones\")", "def hosted_zone_by_id(client: boto3.client, zone_id: str) -> AWSResponse:\n try:\n return client.get_hosted_zone(Id=zone_id)\n except ClientError as e:\n logger.exception(e.response['Error']['Message'])\n return {'HostedZone': []}", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_region_db_detail(self, context, id):\n zone_obj = self.dns_manager.get_region_db_detail(context, id)\n return zone_obj", "def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "def getTaskZoneId(self):\n return self.getZoneId()", "def get_zones(self, latitude, longitude):\n result = self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/zones?lat={}&lng={}\".format(latitude, longitude),\n )\n if result and \"zones\" in result:\n return result[\"zones\"]", "def show_node(self):\n if self.controller.node_id:\n self.print_object(\n 'node',\n ('uid', 'status', 'roles'),\n self.controller.get_node()\n )\n else:\n print(\"Please select node at first.\")", "def get_zones_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n status: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZonesResult]:\n ...", "def test_openshift_node_with_cluster_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def list_zone_names():\n get_name = lambda a: a.get_name()\n return map(get_name, list_zones())", "def get_zones(vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"zone\".format(vsys)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def default_zone(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.DEFAULT_ZONE, self._SW_VER)]", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def availability_zone(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n\n return pulumi.get(self, \"availability_zone\")", "def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")", "def get_zone_db_details(self, context, id):\n zone_obj = self.dns_manager.get_zone_db_details(context, id)\n return zone_obj", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"zone_id\")", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def secondary_gce_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secondary_gce_zone\")", "def list_zones(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"dc\", \"objectClass\"]\n else:\n attributes = ALL\n\n self.display(\n self.engine.query(\n self.engine.ZONES_FILTER(),\n attributes, base=','.join([\"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n ),\n verbose\n )", "def test_zone_repr(self):\n zone = Zone('example.com')\n self.assertEqual(f'{zone}', 'Zone<example.com>')" ]
[ "0.6435644", "0.6112369", "0.6112369", "0.61034834", "0.60211384", "0.58364195", "0.5784078", "0.57759815", "0.57568705", "0.5756319", "0.5725448", "0.5716081", "0.5707598", "0.5696081", "0.5689158", "0.5681999", "0.56461537", "0.56018806", "0.5596786", "0.5584914", "0.5552933", "0.554943", "0.554943", "0.55368644", "0.55004627", "0.55004627", "0.5489855", "0.54763794", "0.54349834", "0.54349834", "0.54349834", "0.54349834", "0.54280436", "0.541296", "0.541296", "0.541296", "0.541296", "0.541296", "0.53935075", "0.535713", "0.5355222", "0.53071094", "0.5293484", "0.5259847", "0.52558213", "0.5251002", "0.52319807", "0.52317977", "0.5208251", "0.5199076", "0.5197862", "0.51352066", "0.51350254", "0.5124179", "0.511928", "0.51166046", "0.51134616", "0.51053834", "0.5103786", "0.5089624", "0.5079147", "0.5072194", "0.50632054", "0.5061402", "0.50404125", "0.5038338", "0.50377446", "0.50218004", "0.5012364", "0.5012059", "0.50101775", "0.50100994", "0.50100994", "0.5009554", "0.49993473", "0.49772304", "0.4973348", "0.4973348", "0.49708396", "0.49648046", "0.49566573", "0.4955278", "0.4948763", "0.49481323", "0.49462825", "0.4944216", "0.49441195", "0.49420053", "0.49169606", "0.49058157", "0.48992023", "0.48992023", "0.4898859", "0.48838836", "0.48733884", "0.48733884", "0.4873046", "0.48730037", "0.48691684", "0.48649764" ]
0.48952064
93
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_running_log_records_with_options( self, request: dds_20151201_models.DescribeRunningLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRunningLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.order_type): query['OrderType'] = request.order_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_id): query['RoleId'] = request.role_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRunningLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRunningLogRecordsResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_running_log_records_with_options_async( self, request: dds_20151201_models.DescribeRunningLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeRunningLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.order_type): query['OrderType'] = request.order_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_id): query['RoleId'] = request.role_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeRunningLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeRunningLogRecordsResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def _load_disk(self):\r\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6249917", "0.600348", "0.5359398", "0.5338779", "0.5310546", "0.52928746", "0.5282921", "0.5279352", "0.5279001", "0.52591884", "0.521372", "0.52056307", "0.5136519", "0.5106076", "0.5093585", "0.5093585", "0.508893", "0.50791097", "0.507007", "0.5068071", "0.50492823", "0.5047607", "0.5041982", "0.5033806", "0.50300545", "0.50300545", "0.50220966", "0.50161654", "0.49987411", "0.4973591", "0.49686015", "0.4965938", "0.49597555", "0.4947962", "0.4918784", "0.4917084", "0.49052343", "0.49005905", "0.48973042", "0.48950753", "0.48905793", "0.48889422", "0.48816362", "0.48815823", "0.48786694", "0.48686954", "0.4864698", "0.48646566", "0.4858941", "0.48564273", "0.485455", "0.4843747", "0.48412818", "0.48370776", "0.48358908", "0.4833253", "0.48229158", "0.4806377", "0.4805858", "0.48033783", "0.48021865", "0.47964823", "0.47750932", "0.4768405", "0.4758163", "0.4757573", "0.47427362", "0.4735837", "0.4733725", "0.47297028", "0.47232515", "0.47209674", "0.47128278", "0.47117206", "0.47072574", "0.47022194", "0.46999288", "0.46977976", "0.46937203", "0.46901488", "0.4685285", "0.4685246", "0.46755475", "0.4670844", "0.46674314", "0.46657613", "0.4653522", "0.46523577", "0.46428913", "0.46363285", "0.46357659", "0.46331036", "0.46296275", "0.46278575", "0.4626488", "0.46094543", "0.45972398", "0.45966548", "0.45935205", "0.459188", "0.4590052" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_running_log_records( self, request: dds_20151201_models.DescribeRunningLogRecordsRequest, ) -> dds_20151201_models.DescribeRunningLogRecordsResponse: runtime = util_models.RuntimeOptions() return self.describe_running_log_records_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_running_log_records_async( self, request: dds_20151201_models.DescribeRunningLogRecordsRequest, ) -> dds_20151201_models.DescribeRunningLogRecordsResponse: runtime = util_models.RuntimeOptions() return await self.describe_running_log_records_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.62486845", "0.60031223", "0.53603905", "0.53379613", "0.5311335", "0.52926815", "0.5282698", "0.52791435", "0.5279122", "0.525997", "0.52146244", "0.52052414", "0.5137806", "0.51062226", "0.5093556", "0.5093556", "0.5088433", "0.5079323", "0.50690126", "0.5067886", "0.5049035", "0.5046898", "0.50420696", "0.50331473", "0.5030297", "0.5030297", "0.5022431", "0.5016063", "0.49990737", "0.4972949", "0.4968265", "0.4965747", "0.49598062", "0.49484637", "0.49194708", "0.49178702", "0.4905472", "0.49004963", "0.48970413", "0.489522", "0.48909563", "0.4887827", "0.48821267", "0.4881891", "0.48785982", "0.48678872", "0.48652878", "0.48639703", "0.48581663", "0.48568252", "0.4854318", "0.48435727", "0.48408124", "0.48365796", "0.4836489", "0.48329967", "0.4823732", "0.4806852", "0.4805655", "0.48033065", "0.480192", "0.4796162", "0.4775947", "0.47686175", "0.4759035", "0.47575393", "0.47434506", "0.47358102", "0.47347638", "0.47308475", "0.47239348", "0.47207734", "0.47131824", "0.47117183", "0.47061667", "0.4702254", "0.4699498", "0.4697398", "0.46924555", "0.46898118", "0.4685107", "0.46848604", "0.46757394", "0.4671213", "0.46672994", "0.4666417", "0.46534035", "0.4653088", "0.46438026", "0.4637459", "0.4635404", "0.46326485", "0.46303207", "0.46281067", "0.4626461", "0.4609743", "0.45968643", "0.45966214", "0.45938614", "0.45920384", "0.45898995" ]
0.0
-1
This operation supports sharded cluster instances only.
def describe_sharding_network_address_with_options( self, request: dds_20151201_models.DescribeShardingNetworkAddressRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeShardingNetworkAddressResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeShardingNetworkAddress', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeShardingNetworkAddressResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster(self):\n assert False", "def cluster_myshardid(self, target_nodes=None):\n return self.execute_command(\"CLUSTER MYSHARDID\", target_nodes=target_nodes)", "def delete_cluster(self):", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def create_redshift_cluster(config, redshift_role):\n redshift = create_boto3_client(config, 'redshift')\n cluster_identifier = config.get('CLUSTER', 'CLUSTER_IDENTIFIER')\n print(\"Creating redshift cluster: %s\" % cluster_identifier)\n try:\n cc_response = redshift.create_cluster(\n MasterUsername=config.get('CLUSTER', 'DB_USER'),\n MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),\n ClusterIdentifier=cluster_identifier,\n NodeType=config.get('CLUSTER', 'NODE_TYPE'),\n NumberOfNodes=int(config.get('CLUSTER', 'NODE_COUNT')),\n Port=int(config.get('CLUSTER', 'DB_PORT')),\n IamRoles=[\n redshift_role['Role']['Arn']\n ],\n ClusterSubnetGroupName=config.get('CLUSTER', 'SUBNET_GROUP'),\n ClusterSecurityGroups=[config.get('CLUSTER', 'SECURITY_GROUP_ID')]\n )\n print('Creating Cluster:', cc_response)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ClusterAlreadyExists':\n print(\"Cluster %s already exists\" % cluster_identifier)\n return\n else:\n print(\"Unexpected error wile creating cluster: %s\" % e)\n\n print('Sleep 5 seconds')\n time.sleep(5)\n while True:\n print('Fetching status of cluster..')\n try:\n cluster_status = get_cluster_status(redshift, cluster_identifier)\n if cluster_status['Clusters'][0]['ClusterStatus'] == 'available':\n break\n print('Cluster Status:', cluster_status)\n except ClientError as e:\n print(\"Unexpected error wile getting cluster status: %s\" % e)\n raise e\n print('Sleep 10 seconds')\n time.sleep(10)\n print('Cluster is created and available.')", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def run_clean_cluster(red_df, remove_df=None):\n cluster_df = cluster.get_cluster_output_df(red_df, False, quantile=0.8)\n return run_split_cluster(cluster_df, remove_df)", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def test_patch_cluster_role(self):\n pass", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def launch_cluster(\n descs: List[MachineDesc],\n *,\n nonce: Optional[str] = None,\n key_name: Optional[str] = None,\n security_group_name: str = DEFAULT_SECURITY_GROUP_NAME,\n instance_profile: Optional[str] = DEFAULT_INSTANCE_PROFILE_NAME,\n extra_tags: Dict[str, str] = {},\n delete_after: datetime.datetime,\n git_rev: str = \"HEAD\",\n extra_env: Dict[str, str] = {},\n) -> List[Instance]:\n\n if not nonce:\n nonce = util.nonce(8)\n\n instances = [\n launch(\n key_name=key_name,\n instance_type=d.instance_type,\n ami=d.ami,\n ami_user=d.ami_user,\n tags={**d.tags, **extra_tags},\n display_name=f\"{nonce}-{d.name}\",\n size_gb=d.size_gb,\n security_group_name=security_group_name,\n instance_profile=instance_profile,\n nonce=nonce,\n delete_after=delete_after,\n )\n for d in descs\n ]\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(\n *(\n setup(i, git_rev if d.checkout else \"HEAD\")\n for (i, d) in zip(instances, descs)\n )\n )\n )\n\n hosts_str = \"\".join(\n (f\"{i.private_ip_address}\\t{d.name}\\n\" for (i, d) in zip(instances, descs))\n )\n for i in instances:\n mssh(i, \"sudo tee -a /etc/hosts\", input=hosts_str.encode())\n\n env = \" \".join(f\"{k}={shlex.quote(v)}\" for k, v in extra_env.items())\n for (i, d) in zip(instances, descs):\n if d.launch_script:\n mssh(\n i,\n f\"(cd materialize && {env} nohup bash -c {shlex.quote(d.launch_script)}) &> mzscratch.log &\",\n )\n\n return instances", "def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)", "def test_replace_cluster_role(self):\n pass", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def test_create_cluster_role(self):\n pass", "def _get_cluster_list(self):\n return self.__cluster_list", "def cluster_shards(self, target_nodes=None):\n return self.execute_command(\"CLUSTER SHARDS\", target_nodes=target_nodes)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def atlas_clusters():\n pass", "def resource_type(self):\n return 'cluster'", "def create_cache_cluster(stack, name, cache_type, vpc, cidrs, subnet_ids,\n instance_type, num_cache_clusters):\n ports = {'redis': 6379, 'memcached': 11211}\n ingress = []\n\n for idx, cidr in enumerate(cidrs):\n ingress.append(\n SecurityGroupRule(\n '{0}{1}{2}'.format(name.replace('-', ''), cache_type, idx),\n CidrIp=cidr,\n FromPort=ports[cache_type],\n ToPort=ports[cache_type],\n IpProtocol='tcp',\n ))\n\n secgroup = stack.stack.add_resource(\n SecurityGroup(\n '{0}{1}SecurityGroup'.format(name.replace('-', ''), cache_type),\n GroupDescription='{0} {1} Security Group'.format(name, cache_type),\n SecurityGroupIngress=ingress,\n SecurityGroupEgress=[\n SecurityGroupRule(\n '{0}egress'.format(name.replace('-', '')),\n CidrIp='0.0.0.0/0',\n IpProtocol='-1')\n ],\n VpcId=vpc,\n ))\n\n subnet_group = stack.stack.add_resource(\n elasticache.SubnetGroup(\n '{0}{1}cache'.format(name.replace('-', ''), cache_type),\n Description='{0}{1} cache'.format(name, cache_type),\n SubnetIds=subnet_ids,\n ))\n\n if num_cache_clusters > 1:\n stack.stack.add_resource(\n elasticache.ReplicationGroup(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ReplicationGroupId='{0}'.format(name),\n ReplicationGroupDescription='{0}cluster'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.6',\n CacheNodeType=instance_type,\n NumCacheClusters=num_cache_clusters,\n CacheSubnetGroupName=Ref(subnet_group),\n SecurityGroupIds=[Ref(secgroup)],\n AtRestEncryptionEnabled=True))\n else:\n stack.stack.add_resource(\n elasticache.CacheCluster(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ClusterName='{0}'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.10',\n CacheNodeType=instance_type,\n NumCacheNodes=num_cache_clusters,\n VpcSecurityGroupIds=[Ref(secgroup)],\n CacheSubnetGroupName=Ref(subnet_group)))", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def test_replace_cluster_resource_quota(self):\n pass", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def remove_cluster(config, nova, neutron, cinder, conn):\n\n cluster_info = OSClusterInfo(nova, neutron, cinder, config, conn)\n masters = cluster_info.get_instances(\"node\")\n workers = cluster_info.get_instances(\"master\")\n\n tasks = [host.delete(neutron) for host in masters if host]\n tasks += [host.delete(neutron) for host in workers if host]\n if tasks:\n LOGGER.debug(\"Deleting Instances ...\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n LoadBalancer(config, conn).delete()\n\n sg_name = '%s-sec-group' % config['cluster-name']\n secg = conn.list_security_groups({\"name\": sg_name})\n if secg:\n LOGGER.debug(\"Deleting SecurityGroup %s ...\", sg_name)\n for sg in secg:\n for rule in sg.security_group_rules:\n conn.delete_security_group_rule(rule['id'])\n\n for port in conn.list_ports():\n if sg.id in port.security_groups:\n conn.delete_port(port.id)\n conn.delete_security_group(sg_name)\n\n # This needs to be replaced with OpenStackAPI in the future\n for vol in cinder.volumes.list():\n try:\n if config['cluster-name'] in vol.name and vol.status != 'in-use':\n try:\n vol.delete()\n except (BadRequest, NotFound):\n pass\n\n except TypeError:\n continue\n\n # delete the cluster key pair\n conn.delete_keypair(config['cluster-name'])", "def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def test_patch_hyperflex_cluster(self):\n pass", "def test_read_cluster_role(self):\n pass", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _load_cluster(self):", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def test_create_cluster_resource_quota(self):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def test_list_cluster_role(self):\n pass", "def testSharded(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def terminateCluster():\n try:\n # delete cluster\n redshift.delete_cluster(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True\n )\n\n # clear up role\n iam.detach_role_policy(\n RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)\n except Exception as e:\n print(e)", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_get_hyperflex_cluster_list(self):\n pass", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def test_replace_cluster_policy(self):\n pass", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def test_delete_cluster_role(self):\n pass", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def test_list_cluster_policy(self):\n pass", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def clusters(self):\n raise NotImplementedError", "def test_list_cluster_resource_quota(self):\n pass", "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def update_existed_cluster(existed_cluster,sema_cluster,qid,returned_result):\n if qid not in existed_cluster:\n existed_cluster[qid] = set()\n\n for tid in returned_result:\n cluster_id = sema_cluster.get_cluster_id(qid,tid)\n if cluster_id is not None:\n if cluster_id not in existed_cluster[qid]:\n existed_cluster[qid].add(cluster_id)", "def run_split_cluster(cluster_df, keep_df=None, remove_df=None):\n if keep_df is None:\n keep_df = pd.DataFrame(columns=cluster_df.columns)\n if remove_df is None:\n remove_df = pd.DataFrame(columns=cluster_df.columns)\n for label, group_df in cluster_df.groupby([\"label\"]):\n # only keep the maximum cluster\n center_count = {center: len(group_df[group_df.center == center]) for center in group_df.center.unique()}\n max_center = sorted(center_count.items(), key=lambda i: i[1], reverse=True)[0][0]\n keep_df = keep_df.append(group_df[group_df.center == max_center], ignore_index=True)\n remove_df = remove_df.append(group_df[group_df.center != max_center], ignore_index=True)\n return keep_df, remove_df", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def cluster_spec(num_workers, num_ps):\n cluster = {}\n port = 12222\n\n all_ps = []\n host = '127.0.0.1'\n for _ in range(num_ps):\n all_ps.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['ps'] = all_ps\n\n all_workers = []\n for _ in range(num_workers):\n all_workers.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['worker'] = all_workers\n return cluster", "def test_read_cluster_resource_quota(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def test_replace_cluster_resource_quota_status(self):\n pass", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def test_patch_cluster_policy(self):\n pass", "def cluster_amazon_video_game_again() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def test_create_cluster_policy(self):\n pass", "def cluster_hdbscan(\n clusterable_embedding, min_cluster_size, viz_embedding_list\n):\n print(f\"min_cluster size: {min_cluster_size}\")\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=min_cluster_size, prediction_data=True\n ).fit(clusterable_embedding)\n labels = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,).fit_predict(\n clusterable_embedding\n )\n print(f\"found {len(np.unique(labels))} clusters\")\n clustered = labels >= 0\n print(f\"fraction clustered: {np.sum(clustered)/labels.shape[0]}\")\n for embedding in viz_embedding_list:\n plt.scatter(\n embedding[~clustered][:, 0],\n embedding[~clustered][:, 1],\n c=(0.5, 0.5, 0.5),\n s=10,\n alpha=0.5,\n )\n plt.scatter(\n embedding[clustered][:, 0],\n embedding[clustered][:, 1],\n c=labels[clustered],\n s=10,\n cmap=\"Spectral\",\n )\n plt.legend(labels)\n plt.show()\n\n return labels, clusterer", "def test_read_cluster_policy(self):\n pass", "def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_object = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_object)\n ceph_nodes = kw.get(\"ceph_nodes\")\n osd_list = []\n total_osd_app_mem = {}\n\n for node in ceph_nodes:\n if node.role == \"osd\":\n node_osds = rados_object.collect_osd_daemon_ids(node)\n osd_list = osd_list + node_osds\n\n target_configs = config[\"cache_trim_max_skip_pinned\"][\"configurations\"]\n max_skip_pinned_value = int(\n mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_trim_max_skip_pinned\")\n )\n\n # Check the default value of the bluestore_cache_trim_max_skip_pinned value\n if max_skip_pinned_value != 1000:\n log.error(\n \"The default value of bluestore_cache_trim_max_skip_pinned not equal to 1000\"\n )\n raise Exception(\n \"The default value of bluestore_cache_trim_max_skip_pinned is not 1000\"\n )\n\n # Creating pools and starting the test\n for entry in target_configs.values():\n log.debug(\n f\"Creating {entry['pool_type']} pool on the cluster with name {entry['pool_name']}\"\n )\n if entry.get(\"pool_type\", \"replicated\") == \"erasure\":\n method_should_succeed(\n rados_object.create_erasure_pool, name=entry[\"pool_name\"], **entry\n )\n else:\n method_should_succeed(\n rados_object.create_pool,\n **entry,\n )\n\n if not rados_object.bench_write(**entry):\n log.error(\"Failed to write objects into the EC Pool\")\n return 1\n rados_object.bench_read(**entry)\n log.info(\"Finished writing data into the pool\")\n\n # performing scrub and deep-scrub\n rados_object.run_scrub()\n rados_object.run_deep_scrub()\n time.sleep(10)\n\n rados_object.change_heap_profiler_state(osd_list, \"start\")\n # Executing tests for 45 minutes\n time_execution = datetime.datetime.now() + datetime.timedelta(minutes=45)\n while datetime.datetime.now() < time_execution:\n # Get all OSDs heap dump\n heap_dump = rados_object.get_heap_dump(osd_list)\n # get the osd application used memory\n osd_app_mem = get_bytes_used_by_app(heap_dump)\n total_osd_app_mem = mergeDictionary(total_osd_app_mem, osd_app_mem)\n # wait for 10 seconds and collecting the memory\n time.sleep(10)\n for osd_id, mem_list in total_osd_app_mem.items():\n mem_growth = is_what_percent_mem(mem_list)\n if mem_growth > 80:\n log.error(\n f\"The osd.{osd_id} consuming more memory with the relative memory growth {mem_growth}\"\n )\n raise Exception(\"No warning generated by PG Autoscaler\")\n log.info(f\"The relative memory growth for the osd.{osd_id} is {mem_growth} \")\n\n rados_object.change_heap_profiler_state(osd_list, \"stop\")\n\n # check fo the crashes in the cluster\n crash_list = rados_object.do_crash_ls()\n if not crash_list:\n return 0\n else:\n return 1", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def cluster_amazon_video_game() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def is_distributed(args: dict) -> bool:\n\n return args.local_rank != -1", "def is_distributed() -> NotImplementedError:\n raise NotImplementedError()", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"cluster_enabled\")", "def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)", "def test_update_hyperflex_cluster(self):\n pass", "def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def find_cluster(self, id):\n raise NotImplementedError", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def on_public_cluster(self) -> bool:\n return not self.on_private_cluster" ]
[ "0.62730116", "0.5638975", "0.5505993", "0.5474679", "0.5416399", "0.540477", "0.53883356", "0.5373748", "0.5364157", "0.5346423", "0.533336", "0.5286241", "0.52805185", "0.52731586", "0.526392", "0.5225001", "0.52148056", "0.5214644", "0.52102435", "0.51972777", "0.51717037", "0.516918", "0.5163223", "0.5153653", "0.5148468", "0.51441205", "0.51432306", "0.5140414", "0.5117445", "0.5110743", "0.51106936", "0.51076955", "0.5100692", "0.50926167", "0.50922644", "0.50867045", "0.5076853", "0.5067921", "0.50656027", "0.5064113", "0.5062118", "0.5029349", "0.5028662", "0.5007443", "0.5002565", "0.49904776", "0.498782", "0.49856204", "0.49795344", "0.49658036", "0.49558887", "0.49533403", "0.4939127", "0.49367672", "0.4928445", "0.4922915", "0.49198923", "0.49128005", "0.48971844", "0.4887591", "0.48871034", "0.4885436", "0.48799372", "0.48791263", "0.48788887", "0.4871592", "0.48700768", "0.48665884", "0.48582208", "0.48555335", "0.48535475", "0.4847886", "0.4843657", "0.48428187", "0.48341644", "0.48295778", "0.48220143", "0.48204297", "0.48203462", "0.48197788", "0.48180035", "0.48175037", "0.480513", "0.480513", "0.48012695", "0.48006633", "0.4795675", "0.47945547", "0.47908092", "0.47903603", "0.47898173", "0.47844145", "0.47821182", "0.47812313", "0.47744507", "0.4768171", "0.47614446", "0.4760131", "0.47542343", "0.4750937", "0.47501296" ]
0.0
-1
This operation supports sharded cluster instances only.
async def describe_sharding_network_address_with_options_async( self, request: dds_20151201_models.DescribeShardingNetworkAddressRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeShardingNetworkAddressResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeShardingNetworkAddress', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeShardingNetworkAddressResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster(self):\n assert False", "def cluster_myshardid(self, target_nodes=None):\n return self.execute_command(\"CLUSTER MYSHARDID\", target_nodes=target_nodes)", "def delete_cluster(self):", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def create_redshift_cluster(config, redshift_role):\n redshift = create_boto3_client(config, 'redshift')\n cluster_identifier = config.get('CLUSTER', 'CLUSTER_IDENTIFIER')\n print(\"Creating redshift cluster: %s\" % cluster_identifier)\n try:\n cc_response = redshift.create_cluster(\n MasterUsername=config.get('CLUSTER', 'DB_USER'),\n MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),\n ClusterIdentifier=cluster_identifier,\n NodeType=config.get('CLUSTER', 'NODE_TYPE'),\n NumberOfNodes=int(config.get('CLUSTER', 'NODE_COUNT')),\n Port=int(config.get('CLUSTER', 'DB_PORT')),\n IamRoles=[\n redshift_role['Role']['Arn']\n ],\n ClusterSubnetGroupName=config.get('CLUSTER', 'SUBNET_GROUP'),\n ClusterSecurityGroups=[config.get('CLUSTER', 'SECURITY_GROUP_ID')]\n )\n print('Creating Cluster:', cc_response)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ClusterAlreadyExists':\n print(\"Cluster %s already exists\" % cluster_identifier)\n return\n else:\n print(\"Unexpected error wile creating cluster: %s\" % e)\n\n print('Sleep 5 seconds')\n time.sleep(5)\n while True:\n print('Fetching status of cluster..')\n try:\n cluster_status = get_cluster_status(redshift, cluster_identifier)\n if cluster_status['Clusters'][0]['ClusterStatus'] == 'available':\n break\n print('Cluster Status:', cluster_status)\n except ClientError as e:\n print(\"Unexpected error wile getting cluster status: %s\" % e)\n raise e\n print('Sleep 10 seconds')\n time.sleep(10)\n print('Cluster is created and available.')", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def run_clean_cluster(red_df, remove_df=None):\n cluster_df = cluster.get_cluster_output_df(red_df, False, quantile=0.8)\n return run_split_cluster(cluster_df, remove_df)", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def test_patch_cluster_role(self):\n pass", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def launch_cluster(\n descs: List[MachineDesc],\n *,\n nonce: Optional[str] = None,\n key_name: Optional[str] = None,\n security_group_name: str = DEFAULT_SECURITY_GROUP_NAME,\n instance_profile: Optional[str] = DEFAULT_INSTANCE_PROFILE_NAME,\n extra_tags: Dict[str, str] = {},\n delete_after: datetime.datetime,\n git_rev: str = \"HEAD\",\n extra_env: Dict[str, str] = {},\n) -> List[Instance]:\n\n if not nonce:\n nonce = util.nonce(8)\n\n instances = [\n launch(\n key_name=key_name,\n instance_type=d.instance_type,\n ami=d.ami,\n ami_user=d.ami_user,\n tags={**d.tags, **extra_tags},\n display_name=f\"{nonce}-{d.name}\",\n size_gb=d.size_gb,\n security_group_name=security_group_name,\n instance_profile=instance_profile,\n nonce=nonce,\n delete_after=delete_after,\n )\n for d in descs\n ]\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(\n *(\n setup(i, git_rev if d.checkout else \"HEAD\")\n for (i, d) in zip(instances, descs)\n )\n )\n )\n\n hosts_str = \"\".join(\n (f\"{i.private_ip_address}\\t{d.name}\\n\" for (i, d) in zip(instances, descs))\n )\n for i in instances:\n mssh(i, \"sudo tee -a /etc/hosts\", input=hosts_str.encode())\n\n env = \" \".join(f\"{k}={shlex.quote(v)}\" for k, v in extra_env.items())\n for (i, d) in zip(instances, descs):\n if d.launch_script:\n mssh(\n i,\n f\"(cd materialize && {env} nohup bash -c {shlex.quote(d.launch_script)}) &> mzscratch.log &\",\n )\n\n return instances", "def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)", "def test_replace_cluster_role(self):\n pass", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def _get_cluster_list(self):\n return self.__cluster_list", "def test_create_cluster_role(self):\n pass", "def cluster_shards(self, target_nodes=None):\n return self.execute_command(\"CLUSTER SHARDS\", target_nodes=target_nodes)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def atlas_clusters():\n pass", "def resource_type(self):\n return 'cluster'", "def create_cache_cluster(stack, name, cache_type, vpc, cidrs, subnet_ids,\n instance_type, num_cache_clusters):\n ports = {'redis': 6379, 'memcached': 11211}\n ingress = []\n\n for idx, cidr in enumerate(cidrs):\n ingress.append(\n SecurityGroupRule(\n '{0}{1}{2}'.format(name.replace('-', ''), cache_type, idx),\n CidrIp=cidr,\n FromPort=ports[cache_type],\n ToPort=ports[cache_type],\n IpProtocol='tcp',\n ))\n\n secgroup = stack.stack.add_resource(\n SecurityGroup(\n '{0}{1}SecurityGroup'.format(name.replace('-', ''), cache_type),\n GroupDescription='{0} {1} Security Group'.format(name, cache_type),\n SecurityGroupIngress=ingress,\n SecurityGroupEgress=[\n SecurityGroupRule(\n '{0}egress'.format(name.replace('-', '')),\n CidrIp='0.0.0.0/0',\n IpProtocol='-1')\n ],\n VpcId=vpc,\n ))\n\n subnet_group = stack.stack.add_resource(\n elasticache.SubnetGroup(\n '{0}{1}cache'.format(name.replace('-', ''), cache_type),\n Description='{0}{1} cache'.format(name, cache_type),\n SubnetIds=subnet_ids,\n ))\n\n if num_cache_clusters > 1:\n stack.stack.add_resource(\n elasticache.ReplicationGroup(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ReplicationGroupId='{0}'.format(name),\n ReplicationGroupDescription='{0}cluster'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.6',\n CacheNodeType=instance_type,\n NumCacheClusters=num_cache_clusters,\n CacheSubnetGroupName=Ref(subnet_group),\n SecurityGroupIds=[Ref(secgroup)],\n AtRestEncryptionEnabled=True))\n else:\n stack.stack.add_resource(\n elasticache.CacheCluster(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ClusterName='{0}'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.10',\n CacheNodeType=instance_type,\n NumCacheNodes=num_cache_clusters,\n VpcSecurityGroupIds=[Ref(secgroup)],\n CacheSubnetGroupName=Ref(subnet_group)))", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def test_replace_cluster_resource_quota(self):\n pass", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def remove_cluster(config, nova, neutron, cinder, conn):\n\n cluster_info = OSClusterInfo(nova, neutron, cinder, config, conn)\n masters = cluster_info.get_instances(\"node\")\n workers = cluster_info.get_instances(\"master\")\n\n tasks = [host.delete(neutron) for host in masters if host]\n tasks += [host.delete(neutron) for host in workers if host]\n if tasks:\n LOGGER.debug(\"Deleting Instances ...\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n LoadBalancer(config, conn).delete()\n\n sg_name = '%s-sec-group' % config['cluster-name']\n secg = conn.list_security_groups({\"name\": sg_name})\n if secg:\n LOGGER.debug(\"Deleting SecurityGroup %s ...\", sg_name)\n for sg in secg:\n for rule in sg.security_group_rules:\n conn.delete_security_group_rule(rule['id'])\n\n for port in conn.list_ports():\n if sg.id in port.security_groups:\n conn.delete_port(port.id)\n conn.delete_security_group(sg_name)\n\n # This needs to be replaced with OpenStackAPI in the future\n for vol in cinder.volumes.list():\n try:\n if config['cluster-name'] in vol.name and vol.status != 'in-use':\n try:\n vol.delete()\n except (BadRequest, NotFound):\n pass\n\n except TypeError:\n continue\n\n # delete the cluster key pair\n conn.delete_keypair(config['cluster-name'])", "def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def test_patch_hyperflex_cluster(self):\n pass", "def test_read_cluster_role(self):\n pass", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def _load_cluster(self):", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def test_create_cluster_resource_quota(self):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def testSharded(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)", "def test_list_cluster_role(self):\n pass", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def terminateCluster():\n try:\n # delete cluster\n redshift.delete_cluster(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True\n )\n\n # clear up role\n iam.detach_role_policy(\n RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)\n except Exception as e:\n print(e)", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_get_hyperflex_cluster_list(self):\n pass", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def test_replace_cluster_policy(self):\n pass", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def test_delete_cluster_role(self):\n pass", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def test_list_cluster_policy(self):\n pass", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def clusters(self):\n raise NotImplementedError", "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def test_list_cluster_resource_quota(self):\n pass", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def update_existed_cluster(existed_cluster,sema_cluster,qid,returned_result):\n if qid not in existed_cluster:\n existed_cluster[qid] = set()\n\n for tid in returned_result:\n cluster_id = sema_cluster.get_cluster_id(qid,tid)\n if cluster_id is not None:\n if cluster_id not in existed_cluster[qid]:\n existed_cluster[qid].add(cluster_id)", "def run_split_cluster(cluster_df, keep_df=None, remove_df=None):\n if keep_df is None:\n keep_df = pd.DataFrame(columns=cluster_df.columns)\n if remove_df is None:\n remove_df = pd.DataFrame(columns=cluster_df.columns)\n for label, group_df in cluster_df.groupby([\"label\"]):\n # only keep the maximum cluster\n center_count = {center: len(group_df[group_df.center == center]) for center in group_df.center.unique()}\n max_center = sorted(center_count.items(), key=lambda i: i[1], reverse=True)[0][0]\n keep_df = keep_df.append(group_df[group_df.center == max_center], ignore_index=True)\n remove_df = remove_df.append(group_df[group_df.center != max_center], ignore_index=True)\n return keep_df, remove_df", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def cluster_spec(num_workers, num_ps):\n cluster = {}\n port = 12222\n\n all_ps = []\n host = '127.0.0.1'\n for _ in range(num_ps):\n all_ps.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['ps'] = all_ps\n\n all_workers = []\n for _ in range(num_workers):\n all_workers.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['worker'] = all_workers\n return cluster", "def test_read_cluster_resource_quota(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def test_replace_cluster_resource_quota_status(self):\n pass", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def test_patch_cluster_policy(self):\n pass", "def cluster_amazon_video_game_again() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def test_create_cluster_policy(self):\n pass", "def cluster_hdbscan(\n clusterable_embedding, min_cluster_size, viz_embedding_list\n):\n print(f\"min_cluster size: {min_cluster_size}\")\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=min_cluster_size, prediction_data=True\n ).fit(clusterable_embedding)\n labels = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,).fit_predict(\n clusterable_embedding\n )\n print(f\"found {len(np.unique(labels))} clusters\")\n clustered = labels >= 0\n print(f\"fraction clustered: {np.sum(clustered)/labels.shape[0]}\")\n for embedding in viz_embedding_list:\n plt.scatter(\n embedding[~clustered][:, 0],\n embedding[~clustered][:, 1],\n c=(0.5, 0.5, 0.5),\n s=10,\n alpha=0.5,\n )\n plt.scatter(\n embedding[clustered][:, 0],\n embedding[clustered][:, 1],\n c=labels[clustered],\n s=10,\n cmap=\"Spectral\",\n )\n plt.legend(labels)\n plt.show()\n\n return labels, clusterer", "def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance", "def test_read_cluster_policy(self):\n pass", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_object = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_object)\n ceph_nodes = kw.get(\"ceph_nodes\")\n osd_list = []\n total_osd_app_mem = {}\n\n for node in ceph_nodes:\n if node.role == \"osd\":\n node_osds = rados_object.collect_osd_daemon_ids(node)\n osd_list = osd_list + node_osds\n\n target_configs = config[\"cache_trim_max_skip_pinned\"][\"configurations\"]\n max_skip_pinned_value = int(\n mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_trim_max_skip_pinned\")\n )\n\n # Check the default value of the bluestore_cache_trim_max_skip_pinned value\n if max_skip_pinned_value != 1000:\n log.error(\n \"The default value of bluestore_cache_trim_max_skip_pinned not equal to 1000\"\n )\n raise Exception(\n \"The default value of bluestore_cache_trim_max_skip_pinned is not 1000\"\n )\n\n # Creating pools and starting the test\n for entry in target_configs.values():\n log.debug(\n f\"Creating {entry['pool_type']} pool on the cluster with name {entry['pool_name']}\"\n )\n if entry.get(\"pool_type\", \"replicated\") == \"erasure\":\n method_should_succeed(\n rados_object.create_erasure_pool, name=entry[\"pool_name\"], **entry\n )\n else:\n method_should_succeed(\n rados_object.create_pool,\n **entry,\n )\n\n if not rados_object.bench_write(**entry):\n log.error(\"Failed to write objects into the EC Pool\")\n return 1\n rados_object.bench_read(**entry)\n log.info(\"Finished writing data into the pool\")\n\n # performing scrub and deep-scrub\n rados_object.run_scrub()\n rados_object.run_deep_scrub()\n time.sleep(10)\n\n rados_object.change_heap_profiler_state(osd_list, \"start\")\n # Executing tests for 45 minutes\n time_execution = datetime.datetime.now() + datetime.timedelta(minutes=45)\n while datetime.datetime.now() < time_execution:\n # Get all OSDs heap dump\n heap_dump = rados_object.get_heap_dump(osd_list)\n # get the osd application used memory\n osd_app_mem = get_bytes_used_by_app(heap_dump)\n total_osd_app_mem = mergeDictionary(total_osd_app_mem, osd_app_mem)\n # wait for 10 seconds and collecting the memory\n time.sleep(10)\n for osd_id, mem_list in total_osd_app_mem.items():\n mem_growth = is_what_percent_mem(mem_list)\n if mem_growth > 80:\n log.error(\n f\"The osd.{osd_id} consuming more memory with the relative memory growth {mem_growth}\"\n )\n raise Exception(\"No warning generated by PG Autoscaler\")\n log.info(f\"The relative memory growth for the osd.{osd_id} is {mem_growth} \")\n\n rados_object.change_heap_profiler_state(osd_list, \"stop\")\n\n # check fo the crashes in the cluster\n crash_list = rados_object.do_crash_ls()\n if not crash_list:\n return 0\n else:\n return 1", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def cluster_amazon_video_game() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def is_distributed(args: dict) -> bool:\n\n return args.local_rank != -1", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def is_distributed() -> NotImplementedError:\n raise NotImplementedError()", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)", "def cluster_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"cluster_enabled\")", "def test_update_hyperflex_cluster(self):\n pass", "def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def find_cluster(self, id):\n raise NotImplementedError", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def on_public_cluster(self) -> bool:\n return not self.on_private_cluster" ]
[ "0.6270075", "0.5638455", "0.5503449", "0.54739726", "0.54159206", "0.5403497", "0.53866297", "0.5371962", "0.5363898", "0.5344102", "0.5332759", "0.5286043", "0.5280283", "0.5271148", "0.526295", "0.52239394", "0.5212385", "0.52122664", "0.52090883", "0.5194919", "0.517035", "0.5167043", "0.5163365", "0.51538503", "0.51475024", "0.51433134", "0.51416206", "0.51392126", "0.5116904", "0.5108965", "0.5108688", "0.5105649", "0.50996655", "0.5090773", "0.50906783", "0.50864303", "0.50752056", "0.50661343", "0.5064143", "0.50631726", "0.5060155", "0.5028259", "0.5026897", "0.50056374", "0.5001095", "0.4989145", "0.4985961", "0.49846128", "0.49781102", "0.49635145", "0.49555397", "0.49527782", "0.49362203", "0.49348548", "0.49280396", "0.4921485", "0.49171188", "0.4910736", "0.48951256", "0.4887648", "0.48862386", "0.48843732", "0.48789194", "0.48777944", "0.48775527", "0.48702514", "0.48689094", "0.48656356", "0.48562747", "0.48542908", "0.48517475", "0.4845224", "0.48442414", "0.48418507", "0.4831256", "0.48294187", "0.4820886", "0.48198265", "0.48197386", "0.4818905", "0.48181748", "0.48173225", "0.48034713", "0.48034713", "0.48007712", "0.47993648", "0.4795661", "0.47934127", "0.47904414", "0.47897455", "0.4789028", "0.4784288", "0.47816095", "0.47795117", "0.4772782", "0.47672155", "0.47608688", "0.47594222", "0.47537187", "0.47508428", "0.4747119" ]
0.0
-1
This operation supports sharded cluster instances only.
def describe_sharding_network_address( self, request: dds_20151201_models.DescribeShardingNetworkAddressRequest, ) -> dds_20151201_models.DescribeShardingNetworkAddressResponse: runtime = util_models.RuntimeOptions() return self.describe_sharding_network_address_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster(self):\n assert False", "def cluster_myshardid(self, target_nodes=None):\n return self.execute_command(\"CLUSTER MYSHARDID\", target_nodes=target_nodes)", "def delete_cluster(self):", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def create_redshift_cluster(config, redshift_role):\n redshift = create_boto3_client(config, 'redshift')\n cluster_identifier = config.get('CLUSTER', 'CLUSTER_IDENTIFIER')\n print(\"Creating redshift cluster: %s\" % cluster_identifier)\n try:\n cc_response = redshift.create_cluster(\n MasterUsername=config.get('CLUSTER', 'DB_USER'),\n MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),\n ClusterIdentifier=cluster_identifier,\n NodeType=config.get('CLUSTER', 'NODE_TYPE'),\n NumberOfNodes=int(config.get('CLUSTER', 'NODE_COUNT')),\n Port=int(config.get('CLUSTER', 'DB_PORT')),\n IamRoles=[\n redshift_role['Role']['Arn']\n ],\n ClusterSubnetGroupName=config.get('CLUSTER', 'SUBNET_GROUP'),\n ClusterSecurityGroups=[config.get('CLUSTER', 'SECURITY_GROUP_ID')]\n )\n print('Creating Cluster:', cc_response)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ClusterAlreadyExists':\n print(\"Cluster %s already exists\" % cluster_identifier)\n return\n else:\n print(\"Unexpected error wile creating cluster: %s\" % e)\n\n print('Sleep 5 seconds')\n time.sleep(5)\n while True:\n print('Fetching status of cluster..')\n try:\n cluster_status = get_cluster_status(redshift, cluster_identifier)\n if cluster_status['Clusters'][0]['ClusterStatus'] == 'available':\n break\n print('Cluster Status:', cluster_status)\n except ClientError as e:\n print(\"Unexpected error wile getting cluster status: %s\" % e)\n raise e\n print('Sleep 10 seconds')\n time.sleep(10)\n print('Cluster is created and available.')", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def run_clean_cluster(red_df, remove_df=None):\n cluster_df = cluster.get_cluster_output_df(red_df, False, quantile=0.8)\n return run_split_cluster(cluster_df, remove_df)", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def test_patch_cluster_role(self):\n pass", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def launch_cluster(\n descs: List[MachineDesc],\n *,\n nonce: Optional[str] = None,\n key_name: Optional[str] = None,\n security_group_name: str = DEFAULT_SECURITY_GROUP_NAME,\n instance_profile: Optional[str] = DEFAULT_INSTANCE_PROFILE_NAME,\n extra_tags: Dict[str, str] = {},\n delete_after: datetime.datetime,\n git_rev: str = \"HEAD\",\n extra_env: Dict[str, str] = {},\n) -> List[Instance]:\n\n if not nonce:\n nonce = util.nonce(8)\n\n instances = [\n launch(\n key_name=key_name,\n instance_type=d.instance_type,\n ami=d.ami,\n ami_user=d.ami_user,\n tags={**d.tags, **extra_tags},\n display_name=f\"{nonce}-{d.name}\",\n size_gb=d.size_gb,\n security_group_name=security_group_name,\n instance_profile=instance_profile,\n nonce=nonce,\n delete_after=delete_after,\n )\n for d in descs\n ]\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(\n *(\n setup(i, git_rev if d.checkout else \"HEAD\")\n for (i, d) in zip(instances, descs)\n )\n )\n )\n\n hosts_str = \"\".join(\n (f\"{i.private_ip_address}\\t{d.name}\\n\" for (i, d) in zip(instances, descs))\n )\n for i in instances:\n mssh(i, \"sudo tee -a /etc/hosts\", input=hosts_str.encode())\n\n env = \" \".join(f\"{k}={shlex.quote(v)}\" for k, v in extra_env.items())\n for (i, d) in zip(instances, descs):\n if d.launch_script:\n mssh(\n i,\n f\"(cd materialize && {env} nohup bash -c {shlex.quote(d.launch_script)}) &> mzscratch.log &\",\n )\n\n return instances", "def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)", "def test_replace_cluster_role(self):\n pass", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def test_create_cluster_role(self):\n pass", "def _get_cluster_list(self):\n return self.__cluster_list", "def cluster_shards(self, target_nodes=None):\n return self.execute_command(\"CLUSTER SHARDS\", target_nodes=target_nodes)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def atlas_clusters():\n pass", "def resource_type(self):\n return 'cluster'", "def create_cache_cluster(stack, name, cache_type, vpc, cidrs, subnet_ids,\n instance_type, num_cache_clusters):\n ports = {'redis': 6379, 'memcached': 11211}\n ingress = []\n\n for idx, cidr in enumerate(cidrs):\n ingress.append(\n SecurityGroupRule(\n '{0}{1}{2}'.format(name.replace('-', ''), cache_type, idx),\n CidrIp=cidr,\n FromPort=ports[cache_type],\n ToPort=ports[cache_type],\n IpProtocol='tcp',\n ))\n\n secgroup = stack.stack.add_resource(\n SecurityGroup(\n '{0}{1}SecurityGroup'.format(name.replace('-', ''), cache_type),\n GroupDescription='{0} {1} Security Group'.format(name, cache_type),\n SecurityGroupIngress=ingress,\n SecurityGroupEgress=[\n SecurityGroupRule(\n '{0}egress'.format(name.replace('-', '')),\n CidrIp='0.0.0.0/0',\n IpProtocol='-1')\n ],\n VpcId=vpc,\n ))\n\n subnet_group = stack.stack.add_resource(\n elasticache.SubnetGroup(\n '{0}{1}cache'.format(name.replace('-', ''), cache_type),\n Description='{0}{1} cache'.format(name, cache_type),\n SubnetIds=subnet_ids,\n ))\n\n if num_cache_clusters > 1:\n stack.stack.add_resource(\n elasticache.ReplicationGroup(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ReplicationGroupId='{0}'.format(name),\n ReplicationGroupDescription='{0}cluster'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.6',\n CacheNodeType=instance_type,\n NumCacheClusters=num_cache_clusters,\n CacheSubnetGroupName=Ref(subnet_group),\n SecurityGroupIds=[Ref(secgroup)],\n AtRestEncryptionEnabled=True))\n else:\n stack.stack.add_resource(\n elasticache.CacheCluster(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ClusterName='{0}'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.10',\n CacheNodeType=instance_type,\n NumCacheNodes=num_cache_clusters,\n VpcSecurityGroupIds=[Ref(secgroup)],\n CacheSubnetGroupName=Ref(subnet_group)))", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def test_replace_cluster_resource_quota(self):\n pass", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def remove_cluster(config, nova, neutron, cinder, conn):\n\n cluster_info = OSClusterInfo(nova, neutron, cinder, config, conn)\n masters = cluster_info.get_instances(\"node\")\n workers = cluster_info.get_instances(\"master\")\n\n tasks = [host.delete(neutron) for host in masters if host]\n tasks += [host.delete(neutron) for host in workers if host]\n if tasks:\n LOGGER.debug(\"Deleting Instances ...\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n LoadBalancer(config, conn).delete()\n\n sg_name = '%s-sec-group' % config['cluster-name']\n secg = conn.list_security_groups({\"name\": sg_name})\n if secg:\n LOGGER.debug(\"Deleting SecurityGroup %s ...\", sg_name)\n for sg in secg:\n for rule in sg.security_group_rules:\n conn.delete_security_group_rule(rule['id'])\n\n for port in conn.list_ports():\n if sg.id in port.security_groups:\n conn.delete_port(port.id)\n conn.delete_security_group(sg_name)\n\n # This needs to be replaced with OpenStackAPI in the future\n for vol in cinder.volumes.list():\n try:\n if config['cluster-name'] in vol.name and vol.status != 'in-use':\n try:\n vol.delete()\n except (BadRequest, NotFound):\n pass\n\n except TypeError:\n continue\n\n # delete the cluster key pair\n conn.delete_keypair(config['cluster-name'])", "def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def test_patch_hyperflex_cluster(self):\n pass", "def test_read_cluster_role(self):\n pass", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _load_cluster(self):", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def test_create_cluster_resource_quota(self):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def test_list_cluster_role(self):\n pass", "def testSharded(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def terminateCluster():\n try:\n # delete cluster\n redshift.delete_cluster(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True\n )\n\n # clear up role\n iam.detach_role_policy(\n RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)\n except Exception as e:\n print(e)", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_get_hyperflex_cluster_list(self):\n pass", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def test_replace_cluster_policy(self):\n pass", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def test_delete_cluster_role(self):\n pass", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def test_list_cluster_policy(self):\n pass", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def clusters(self):\n raise NotImplementedError", "def test_list_cluster_resource_quota(self):\n pass", "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def update_existed_cluster(existed_cluster,sema_cluster,qid,returned_result):\n if qid not in existed_cluster:\n existed_cluster[qid] = set()\n\n for tid in returned_result:\n cluster_id = sema_cluster.get_cluster_id(qid,tid)\n if cluster_id is not None:\n if cluster_id not in existed_cluster[qid]:\n existed_cluster[qid].add(cluster_id)", "def run_split_cluster(cluster_df, keep_df=None, remove_df=None):\n if keep_df is None:\n keep_df = pd.DataFrame(columns=cluster_df.columns)\n if remove_df is None:\n remove_df = pd.DataFrame(columns=cluster_df.columns)\n for label, group_df in cluster_df.groupby([\"label\"]):\n # only keep the maximum cluster\n center_count = {center: len(group_df[group_df.center == center]) for center in group_df.center.unique()}\n max_center = sorted(center_count.items(), key=lambda i: i[1], reverse=True)[0][0]\n keep_df = keep_df.append(group_df[group_df.center == max_center], ignore_index=True)\n remove_df = remove_df.append(group_df[group_df.center != max_center], ignore_index=True)\n return keep_df, remove_df", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def cluster_spec(num_workers, num_ps):\n cluster = {}\n port = 12222\n\n all_ps = []\n host = '127.0.0.1'\n for _ in range(num_ps):\n all_ps.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['ps'] = all_ps\n\n all_workers = []\n for _ in range(num_workers):\n all_workers.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['worker'] = all_workers\n return cluster", "def test_read_cluster_resource_quota(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def test_replace_cluster_resource_quota_status(self):\n pass", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def test_patch_cluster_policy(self):\n pass", "def cluster_amazon_video_game_again() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def test_create_cluster_policy(self):\n pass", "def cluster_hdbscan(\n clusterable_embedding, min_cluster_size, viz_embedding_list\n):\n print(f\"min_cluster size: {min_cluster_size}\")\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=min_cluster_size, prediction_data=True\n ).fit(clusterable_embedding)\n labels = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,).fit_predict(\n clusterable_embedding\n )\n print(f\"found {len(np.unique(labels))} clusters\")\n clustered = labels >= 0\n print(f\"fraction clustered: {np.sum(clustered)/labels.shape[0]}\")\n for embedding in viz_embedding_list:\n plt.scatter(\n embedding[~clustered][:, 0],\n embedding[~clustered][:, 1],\n c=(0.5, 0.5, 0.5),\n s=10,\n alpha=0.5,\n )\n plt.scatter(\n embedding[clustered][:, 0],\n embedding[clustered][:, 1],\n c=labels[clustered],\n s=10,\n cmap=\"Spectral\",\n )\n plt.legend(labels)\n plt.show()\n\n return labels, clusterer", "def test_read_cluster_policy(self):\n pass", "def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_object = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_object)\n ceph_nodes = kw.get(\"ceph_nodes\")\n osd_list = []\n total_osd_app_mem = {}\n\n for node in ceph_nodes:\n if node.role == \"osd\":\n node_osds = rados_object.collect_osd_daemon_ids(node)\n osd_list = osd_list + node_osds\n\n target_configs = config[\"cache_trim_max_skip_pinned\"][\"configurations\"]\n max_skip_pinned_value = int(\n mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_trim_max_skip_pinned\")\n )\n\n # Check the default value of the bluestore_cache_trim_max_skip_pinned value\n if max_skip_pinned_value != 1000:\n log.error(\n \"The default value of bluestore_cache_trim_max_skip_pinned not equal to 1000\"\n )\n raise Exception(\n \"The default value of bluestore_cache_trim_max_skip_pinned is not 1000\"\n )\n\n # Creating pools and starting the test\n for entry in target_configs.values():\n log.debug(\n f\"Creating {entry['pool_type']} pool on the cluster with name {entry['pool_name']}\"\n )\n if entry.get(\"pool_type\", \"replicated\") == \"erasure\":\n method_should_succeed(\n rados_object.create_erasure_pool, name=entry[\"pool_name\"], **entry\n )\n else:\n method_should_succeed(\n rados_object.create_pool,\n **entry,\n )\n\n if not rados_object.bench_write(**entry):\n log.error(\"Failed to write objects into the EC Pool\")\n return 1\n rados_object.bench_read(**entry)\n log.info(\"Finished writing data into the pool\")\n\n # performing scrub and deep-scrub\n rados_object.run_scrub()\n rados_object.run_deep_scrub()\n time.sleep(10)\n\n rados_object.change_heap_profiler_state(osd_list, \"start\")\n # Executing tests for 45 minutes\n time_execution = datetime.datetime.now() + datetime.timedelta(minutes=45)\n while datetime.datetime.now() < time_execution:\n # Get all OSDs heap dump\n heap_dump = rados_object.get_heap_dump(osd_list)\n # get the osd application used memory\n osd_app_mem = get_bytes_used_by_app(heap_dump)\n total_osd_app_mem = mergeDictionary(total_osd_app_mem, osd_app_mem)\n # wait for 10 seconds and collecting the memory\n time.sleep(10)\n for osd_id, mem_list in total_osd_app_mem.items():\n mem_growth = is_what_percent_mem(mem_list)\n if mem_growth > 80:\n log.error(\n f\"The osd.{osd_id} consuming more memory with the relative memory growth {mem_growth}\"\n )\n raise Exception(\"No warning generated by PG Autoscaler\")\n log.info(f\"The relative memory growth for the osd.{osd_id} is {mem_growth} \")\n\n rados_object.change_heap_profiler_state(osd_list, \"stop\")\n\n # check fo the crashes in the cluster\n crash_list = rados_object.do_crash_ls()\n if not crash_list:\n return 0\n else:\n return 1", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def cluster_amazon_video_game() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def is_distributed(args: dict) -> bool:\n\n return args.local_rank != -1", "def is_distributed() -> NotImplementedError:\n raise NotImplementedError()", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def cluster_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"cluster_enabled\")", "def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)", "def test_update_hyperflex_cluster(self):\n pass", "def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def find_cluster(self, id):\n raise NotImplementedError", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def on_public_cluster(self) -> bool:\n return not self.on_private_cluster" ]
[ "0.62730116", "0.5638975", "0.5505993", "0.5474679", "0.5416399", "0.540477", "0.53883356", "0.5373748", "0.5364157", "0.5346423", "0.533336", "0.5286241", "0.52805185", "0.52731586", "0.526392", "0.5225001", "0.52148056", "0.5214644", "0.52102435", "0.51972777", "0.51717037", "0.516918", "0.5163223", "0.5153653", "0.5148468", "0.51441205", "0.51432306", "0.5140414", "0.5117445", "0.5110743", "0.51106936", "0.51076955", "0.5100692", "0.50926167", "0.50922644", "0.50867045", "0.5076853", "0.5067921", "0.50656027", "0.5064113", "0.5062118", "0.5029349", "0.5028662", "0.5007443", "0.5002565", "0.49904776", "0.498782", "0.49856204", "0.49795344", "0.49658036", "0.49558887", "0.49533403", "0.4939127", "0.49367672", "0.4928445", "0.4922915", "0.49198923", "0.49128005", "0.48971844", "0.4887591", "0.48871034", "0.4885436", "0.48799372", "0.48791263", "0.48788887", "0.4871592", "0.48700768", "0.48665884", "0.48582208", "0.48555335", "0.48535475", "0.4847886", "0.4843657", "0.48428187", "0.48341644", "0.48295778", "0.48220143", "0.48204297", "0.48203462", "0.48197788", "0.48180035", "0.48175037", "0.480513", "0.480513", "0.48012695", "0.48006633", "0.4795675", "0.47945547", "0.47908092", "0.47903603", "0.47898173", "0.47844145", "0.47821182", "0.47812313", "0.47744507", "0.4768171", "0.47614446", "0.4760131", "0.47542343", "0.4750937", "0.47501296" ]
0.0
-1
This operation supports sharded cluster instances only.
async def describe_sharding_network_address_async( self, request: dds_20151201_models.DescribeShardingNetworkAddressRequest, ) -> dds_20151201_models.DescribeShardingNetworkAddressResponse: runtime = util_models.RuntimeOptions() return await self.describe_sharding_network_address_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster(self):\n assert False", "def cluster_myshardid(self, target_nodes=None):\n return self.execute_command(\"CLUSTER MYSHARDID\", target_nodes=target_nodes)", "def delete_cluster(self):", "def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise", "def create_redshift_cluster(config, redshift_role):\n redshift = create_boto3_client(config, 'redshift')\n cluster_identifier = config.get('CLUSTER', 'CLUSTER_IDENTIFIER')\n print(\"Creating redshift cluster: %s\" % cluster_identifier)\n try:\n cc_response = redshift.create_cluster(\n MasterUsername=config.get('CLUSTER', 'DB_USER'),\n MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),\n ClusterIdentifier=cluster_identifier,\n NodeType=config.get('CLUSTER', 'NODE_TYPE'),\n NumberOfNodes=int(config.get('CLUSTER', 'NODE_COUNT')),\n Port=int(config.get('CLUSTER', 'DB_PORT')),\n IamRoles=[\n redshift_role['Role']['Arn']\n ],\n ClusterSubnetGroupName=config.get('CLUSTER', 'SUBNET_GROUP'),\n ClusterSecurityGroups=[config.get('CLUSTER', 'SECURITY_GROUP_ID')]\n )\n print('Creating Cluster:', cc_response)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ClusterAlreadyExists':\n print(\"Cluster %s already exists\" % cluster_identifier)\n return\n else:\n print(\"Unexpected error wile creating cluster: %s\" % e)\n\n print('Sleep 5 seconds')\n time.sleep(5)\n while True:\n print('Fetching status of cluster..')\n try:\n cluster_status = get_cluster_status(redshift, cluster_identifier)\n if cluster_status['Clusters'][0]['ClusterStatus'] == 'available':\n break\n print('Cluster Status:', cluster_status)\n except ClientError as e:\n print(\"Unexpected error wile getting cluster status: %s\" % e)\n raise e\n print('Sleep 10 seconds')\n time.sleep(10)\n print('Cluster is created and available.')", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def run_clean_cluster(red_df, remove_df=None):\n cluster_df = cluster.get_cluster_output_df(red_df, False, quantile=0.8)\n return run_split_cluster(cluster_df, remove_df)", "def _create_cluster(self, server_instance):\n return Cluster([server_instance])", "def launch_cluster(params):\n logging.info('Launching cluster of size: {} and type: {}'.format(params.cluster_size, params.instance_type))\n subprocess.check_call(['cgcloud',\n 'create-cluster',\n '--leader-instance-type', 'm3.medium',\n '--instance-type', params.instance_type,\n '--share', params.shared_dir,\n '--num-workers', str(params.cluster_size),\n '-c', params.cluster_name,\n '--spot-bid', str(params.spot_price),\n '--leader-on-demand',\n '--ssh-opts',\n '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no',\n 'toil'])", "def test_patch_cluster_role(self):\n pass", "def recluster(cluster, min_size, guard, func):\r\n if cluster.get_length() == 0:\r\n return\r\n if cluster.get_length() <= min_size:\r\n return cluster\r\n sim = func(cluster.get_tweets())\r\n if sim < guard:\r\n kmeans = TweetKMeans(2)\r\n kmeans.set_data(cluster.get_tweets())\r\n return kmeans.start_algorithm()\r\n return cluster", "def launch_cluster(\n descs: List[MachineDesc],\n *,\n nonce: Optional[str] = None,\n key_name: Optional[str] = None,\n security_group_name: str = DEFAULT_SECURITY_GROUP_NAME,\n instance_profile: Optional[str] = DEFAULT_INSTANCE_PROFILE_NAME,\n extra_tags: Dict[str, str] = {},\n delete_after: datetime.datetime,\n git_rev: str = \"HEAD\",\n extra_env: Dict[str, str] = {},\n) -> List[Instance]:\n\n if not nonce:\n nonce = util.nonce(8)\n\n instances = [\n launch(\n key_name=key_name,\n instance_type=d.instance_type,\n ami=d.ami,\n ami_user=d.ami_user,\n tags={**d.tags, **extra_tags},\n display_name=f\"{nonce}-{d.name}\",\n size_gb=d.size_gb,\n security_group_name=security_group_name,\n instance_profile=instance_profile,\n nonce=nonce,\n delete_after=delete_after,\n )\n for d in descs\n ]\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(\n *(\n setup(i, git_rev if d.checkout else \"HEAD\")\n for (i, d) in zip(instances, descs)\n )\n )\n )\n\n hosts_str = \"\".join(\n (f\"{i.private_ip_address}\\t{d.name}\\n\" for (i, d) in zip(instances, descs))\n )\n for i in instances:\n mssh(i, \"sudo tee -a /etc/hosts\", input=hosts_str.encode())\n\n env = \" \".join(f\"{k}={shlex.quote(v)}\" for k, v in extra_env.items())\n for (i, d) in zip(instances, descs):\n if d.launch_script:\n mssh(\n i,\n f\"(cd materialize && {env} nohup bash -c {shlex.quote(d.launch_script)}) &> mzscratch.log &\",\n )\n\n return instances", "def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)", "def test_replace_cluster_role(self):\n pass", "def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name", "def create_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n cluster_config = {\n 'name': cluster_name,\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters.post(**cluster_config)\n pprint(cluster.data)", "def test_create_cluster_role(self):\n pass", "def _get_cluster_list(self):\n return self.__cluster_list", "def cluster_shards(self, target_nodes=None):\n return self.execute_command(\"CLUSTER SHARDS\", target_nodes=target_nodes)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def atlas_clusters():\n pass", "def resource_type(self):\n return 'cluster'", "def create_cache_cluster(stack, name, cache_type, vpc, cidrs, subnet_ids,\n instance_type, num_cache_clusters):\n ports = {'redis': 6379, 'memcached': 11211}\n ingress = []\n\n for idx, cidr in enumerate(cidrs):\n ingress.append(\n SecurityGroupRule(\n '{0}{1}{2}'.format(name.replace('-', ''), cache_type, idx),\n CidrIp=cidr,\n FromPort=ports[cache_type],\n ToPort=ports[cache_type],\n IpProtocol='tcp',\n ))\n\n secgroup = stack.stack.add_resource(\n SecurityGroup(\n '{0}{1}SecurityGroup'.format(name.replace('-', ''), cache_type),\n GroupDescription='{0} {1} Security Group'.format(name, cache_type),\n SecurityGroupIngress=ingress,\n SecurityGroupEgress=[\n SecurityGroupRule(\n '{0}egress'.format(name.replace('-', '')),\n CidrIp='0.0.0.0/0',\n IpProtocol='-1')\n ],\n VpcId=vpc,\n ))\n\n subnet_group = stack.stack.add_resource(\n elasticache.SubnetGroup(\n '{0}{1}cache'.format(name.replace('-', ''), cache_type),\n Description='{0}{1} cache'.format(name, cache_type),\n SubnetIds=subnet_ids,\n ))\n\n if num_cache_clusters > 1:\n stack.stack.add_resource(\n elasticache.ReplicationGroup(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ReplicationGroupId='{0}'.format(name),\n ReplicationGroupDescription='{0}cluster'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.6',\n CacheNodeType=instance_type,\n NumCacheClusters=num_cache_clusters,\n CacheSubnetGroupName=Ref(subnet_group),\n SecurityGroupIds=[Ref(secgroup)],\n AtRestEncryptionEnabled=True))\n else:\n stack.stack.add_resource(\n elasticache.CacheCluster(\n '{0}CacheCluster'.format(name.replace('-', '')),\n ClusterName='{0}'.format(name),\n Engine='{0}'.format(cache_type),\n EngineVersion='3.2.10',\n CacheNodeType=instance_type,\n NumCacheNodes=num_cache_clusters,\n VpcSecurityGroupIds=[Ref(secgroup)],\n CacheSubnetGroupName=Ref(subnet_group)))", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def test_replace_cluster_resource_quota(self):\n pass", "def launch_cluster(**overrides) -> dict:\n if os.path.isfile(META_FILE):\n raise FileExistsError(\"Cluster already exists!\")\n\n config = DEFAULT_CONFIG.copy()\n config.update(**overrides)\n\n sg = make_sg()\n config[\"Instances\"].update(AdditionalMasterSecurityGroups=[sg.id])\n emr = get_emr_client()\n\n response = emr.run_job_flow(**config)\n cluster_id = response[\"JobFlowId\"]\n master_addr = wait_init(cluster_id)\n\n meta = {\n \"MasterNodeAddr\": master_addr,\n \"ClusterId\": cluster_id,\n \"SGId\": sg.id\n }\n with open(META_FILE, \"w\") as f:\n json.dump(meta, f)\n\n print(\"INFO: Cluster Launched!\")\n return meta", "def startCluster():\n # attempt to create a cluster\n print(\"Creating a Redshift cluster...\")\n try:\n redshift.create_cluster(\n\n # hardware parameters\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n # database access configuration\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n\n # accesses\n IamRoles=[iam.get_role(RoleName=DWH_IAM_ROLE_NAME)[\"Role\"][\"Arn\"]]\n )\n except Exception as e:\n print(e)\n return\n\n # wait for cluster to spin up\n print(\"Waiting for cluster to be available...\")\n while redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )[\"Clusters\"][0][\"ClusterStatus\"] != \"available\":\n time.sleep(30)\n print(\"\\tChecking status again...\")", "def remove_cluster(config, nova, neutron, cinder, conn):\n\n cluster_info = OSClusterInfo(nova, neutron, cinder, config, conn)\n masters = cluster_info.get_instances(\"node\")\n workers = cluster_info.get_instances(\"master\")\n\n tasks = [host.delete(neutron) for host in masters if host]\n tasks += [host.delete(neutron) for host in workers if host]\n if tasks:\n LOGGER.debug(\"Deleting Instances ...\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n LoadBalancer(config, conn).delete()\n\n sg_name = '%s-sec-group' % config['cluster-name']\n secg = conn.list_security_groups({\"name\": sg_name})\n if secg:\n LOGGER.debug(\"Deleting SecurityGroup %s ...\", sg_name)\n for sg in secg:\n for rule in sg.security_group_rules:\n conn.delete_security_group_rule(rule['id'])\n\n for port in conn.list_ports():\n if sg.id in port.security_groups:\n conn.delete_port(port.id)\n conn.delete_security_group(sg_name)\n\n # This needs to be replaced with OpenStackAPI in the future\n for vol in cinder.volumes.list():\n try:\n if config['cluster-name'] in vol.name and vol.status != 'in-use':\n try:\n vol.delete()\n except (BadRequest, NotFound):\n pass\n\n except TypeError:\n continue\n\n # delete the cluster key pair\n conn.delete_keypair(config['cluster-name'])", "def cluster_timeseries(X, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n import scipy as sp\n import time \n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n print('Beginning Calculating pairwise distances between voxels')\n \n X = np.array(X)\n X_dist = sp.spatial.distance.pdist(X.T, metric = similarity_metric)\n \n temp=X_dist\n temp[np.isnan(temp)]=0\n tempmax=temp.max()\n \n X_dist = sp.spatial.distance.squareform(X_dist)\n X_dist[np.isnan(X_dist)]=tempmax\n #import pdb;pdb.set_trace()\n sim_matrix=1-sk.preprocessing.normalize(X_dist, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n #import pdb;pdb.set_trace()\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n# # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n return y_pred", "def test_patch_hyperflex_cluster(self):\n pass", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def test_read_cluster_role(self):\n pass", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def _load_cluster(self):", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def test_create_cluster_resource_quota(self):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def test_list_cluster_role(self):\n pass", "def testSharded(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def test_crud_cluster(self):\n # create the object\n response = self._create_cluster()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED,\n response.content)\n\n # list the object\n cluster_id = self._list_cluster()\n # Assert that the originally created cluster id is the same as the one\n # returned by list\n self.assertEquals(response.data['id'], cluster_id)\n self.assertEquals(response.data['default_vm_type'], 'm5.24xlarge')\n self.assertEquals(response.data['default_zone']['name'], 'us-east-1b')\n\n # check details\n cluster_id = self._check_cluster_exists(cluster_id)\n\n # update cluster\n response = self._update_cluster(cluster_id)\n self.assertEquals(response['name'], 'new_name')\n\n # delete the object\n response = self._delete_cluster(cluster_id)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)\n\n # check it no longer exists\n self._check_no_clusters_exist()", "def _cluster(self):\n # , distance_function=spearman_squared_distance, max_iter=1000, tol=0.0001):\n if self.cluster_method is None:\n clusters = KMedoids(\n self.k,\n self.batchsize,\n dist_func=self.distance_function,\n max_iter=self.max_iter,\n tol=self.tol,\n init_medoids=self.init_medoids,\n swap_medoids=self.swap_medoids,\n )\n clusters.fit(self.clustering_attributions, verbose=self.verbose)\n\n self.subpopulations = clusters.members\n self.subpopulation_sizes = GAM.get_subpopulation_sizes(clusters.members)\n self.explanations = self._get_explanations(clusters.centers)\n # Making explanations return numerical values instead of dask arrays\n if isinstance(self.explanations[0][0][1], da.Array):\n explanations = []\n for explanation in self.explanations:\n explanations.append([(x[0], x[1].compute()) for x in explanation])\n self.explanations = explanations\n else:\n self.cluster_method(self)", "def terminateCluster():\n try:\n # delete cluster\n redshift.delete_cluster(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True\n )\n\n # clear up role\n iam.detach_role_policy(\n RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)\n except Exception as e:\n print(e)", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def create_redshift_cluster(redshift_client, role_arn):\n # Create the cluster if it doesn't exist.\n try:\n response = redshift_client.create_cluster(\n ClusterType=CLUSTER_TYPE,\n NodeType=NODE_TYPE,\n NumberOfNodes=NUM_NODES,\n DBName=DBNAME,\n ClusterIdentifier=IDENTIFIER,\n MasterUsername=USER,\n MasterUserPassword=PASSWORD,\n IamRoles=[role_arn]\n )\n except Exception as e:\n print(e)", "def test_get_hyperflex_cluster_list(self):\n pass", "def cluster_destroy(extra_args=None):\n cmd = [\"pcs\", \"cluster\", \"destroy\"]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n log.debug(\"Running cluster destroy: %s\", cmd)\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def test_replace_cluster_policy(self):\n pass", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def test_delete_cluster_role(self):\n pass", "def cluster_nodes(self) -> ResponseT:\n return self.execute_command(\"CLUSTER NODES\")", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def test_list_cluster_policy(self):\n pass", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def clusters(self):\n raise NotImplementedError", "def create_instance(ami, sg_name):\n instance = None\n ec2 = boto3.resource('ec2',region_name=\"us-east-1\")\n # TODO: Create an EC2 instance\n # Wait for the instance to enter the running state\n # Reload the instance attributes\n\n try:\n instance = ec2.create_instances(\n ImageId=ami,\n InstanceType=INSTANCE_TYPE,\n KeyName=KEY_NAME,\n MaxCount=1,\n MinCount=1,\n SecurityGroupIds=[\n sg_name,\n ],\n TagSpecifications=[{\n 'ResourceType': 'instance',\n 'Tags': TAGS\n }, {\n 'ResourceType': 'volume',\n 'Tags': TAGS\n }]\n )[0]\n instance.wait_until_running()\n instance.reload()\n print(instance.state)\n except ClientError as e:\n print(e)\n\n return instance", "def test_list_cluster_resource_quota(self):\n pass", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def update_existed_cluster(existed_cluster,sema_cluster,qid,returned_result):\n if qid not in existed_cluster:\n existed_cluster[qid] = set()\n\n for tid in returned_result:\n cluster_id = sema_cluster.get_cluster_id(qid,tid)\n if cluster_id is not None:\n if cluster_id not in existed_cluster[qid]:\n existed_cluster[qid].add(cluster_id)", "def deregister_ecs_cluster(EcsClusterArn=None):\n pass", "def run_split_cluster(cluster_df, keep_df=None, remove_df=None):\n if keep_df is None:\n keep_df = pd.DataFrame(columns=cluster_df.columns)\n if remove_df is None:\n remove_df = pd.DataFrame(columns=cluster_df.columns)\n for label, group_df in cluster_df.groupby([\"label\"]):\n # only keep the maximum cluster\n center_count = {center: len(group_df[group_df.center == center]) for center in group_df.center.unique()}\n max_center = sorted(center_count.items(), key=lambda i: i[1], reverse=True)[0][0]\n keep_df = keep_df.append(group_df[group_df.center == max_center], ignore_index=True)\n remove_df = remove_df.append(group_df[group_df.center != max_center], ignore_index=True)\n return keep_df, remove_df", "def test_patch_cluster_resource_quota(self):\n pass", "def cluster_spec(num_workers, num_ps):\n cluster = {}\n port = 12222\n\n all_ps = []\n host = '127.0.0.1'\n for _ in range(num_ps):\n all_ps.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['ps'] = all_ps\n\n all_workers = []\n for _ in range(num_workers):\n all_workers.append(\"{}:{}\".format(host, port))\n port += 1\n cluster['worker'] = all_workers\n return cluster", "def test_read_cluster_resource_quota(self):\n pass", "def test_delete_cluster_resource_quota(self):\n pass", "def test_replace_cluster_resource_quota_status(self):\n pass", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def test_patch_cluster_policy(self):\n pass", "def cluster_amazon_video_game_again() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def test_create_cluster_policy(self):\n pass", "def cluster_hdbscan(\n clusterable_embedding, min_cluster_size, viz_embedding_list\n):\n print(f\"min_cluster size: {min_cluster_size}\")\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=min_cluster_size, prediction_data=True\n ).fit(clusterable_embedding)\n labels = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size,).fit_predict(\n clusterable_embedding\n )\n print(f\"found {len(np.unique(labels))} clusters\")\n clustered = labels >= 0\n print(f\"fraction clustered: {np.sum(clustered)/labels.shape[0]}\")\n for embedding in viz_embedding_list:\n plt.scatter(\n embedding[~clustered][:, 0],\n embedding[~clustered][:, 1],\n c=(0.5, 0.5, 0.5),\n s=10,\n alpha=0.5,\n )\n plt.scatter(\n embedding[clustered][:, 0],\n embedding[clustered][:, 1],\n c=labels[clustered],\n s=10,\n cmap=\"Spectral\",\n )\n plt.legend(labels)\n plt.show()\n\n return labels, clusterer", "def test_read_cluster_policy(self):\n pass", "def run(ceph_cluster, **kw):\n log.info(run.__doc__)\n config = kw[\"config\"]\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_object = RadosOrchestrator(node=cephadm)\n mon_obj = MonConfigMethods(rados_obj=rados_object)\n ceph_nodes = kw.get(\"ceph_nodes\")\n osd_list = []\n total_osd_app_mem = {}\n\n for node in ceph_nodes:\n if node.role == \"osd\":\n node_osds = rados_object.collect_osd_daemon_ids(node)\n osd_list = osd_list + node_osds\n\n target_configs = config[\"cache_trim_max_skip_pinned\"][\"configurations\"]\n max_skip_pinned_value = int(\n mon_obj.get_config(section=\"osd\", param=\"bluestore_cache_trim_max_skip_pinned\")\n )\n\n # Check the default value of the bluestore_cache_trim_max_skip_pinned value\n if max_skip_pinned_value != 1000:\n log.error(\n \"The default value of bluestore_cache_trim_max_skip_pinned not equal to 1000\"\n )\n raise Exception(\n \"The default value of bluestore_cache_trim_max_skip_pinned is not 1000\"\n )\n\n # Creating pools and starting the test\n for entry in target_configs.values():\n log.debug(\n f\"Creating {entry['pool_type']} pool on the cluster with name {entry['pool_name']}\"\n )\n if entry.get(\"pool_type\", \"replicated\") == \"erasure\":\n method_should_succeed(\n rados_object.create_erasure_pool, name=entry[\"pool_name\"], **entry\n )\n else:\n method_should_succeed(\n rados_object.create_pool,\n **entry,\n )\n\n if not rados_object.bench_write(**entry):\n log.error(\"Failed to write objects into the EC Pool\")\n return 1\n rados_object.bench_read(**entry)\n log.info(\"Finished writing data into the pool\")\n\n # performing scrub and deep-scrub\n rados_object.run_scrub()\n rados_object.run_deep_scrub()\n time.sleep(10)\n\n rados_object.change_heap_profiler_state(osd_list, \"start\")\n # Executing tests for 45 minutes\n time_execution = datetime.datetime.now() + datetime.timedelta(minutes=45)\n while datetime.datetime.now() < time_execution:\n # Get all OSDs heap dump\n heap_dump = rados_object.get_heap_dump(osd_list)\n # get the osd application used memory\n osd_app_mem = get_bytes_used_by_app(heap_dump)\n total_osd_app_mem = mergeDictionary(total_osd_app_mem, osd_app_mem)\n # wait for 10 seconds and collecting the memory\n time.sleep(10)\n for osd_id, mem_list in total_osd_app_mem.items():\n mem_growth = is_what_percent_mem(mem_list)\n if mem_growth > 80:\n log.error(\n f\"The osd.{osd_id} consuming more memory with the relative memory growth {mem_growth}\"\n )\n raise Exception(\"No warning generated by PG Autoscaler\")\n log.info(f\"The relative memory growth for the osd.{osd_id} is {mem_growth} \")\n\n rados_object.change_heap_profiler_state(osd_list, \"stop\")\n\n # check fo the crashes in the cluster\n crash_list = rados_object.do_crash_ls()\n if not crash_list:\n return 0\n else:\n return 1", "def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance", "def show_cluster_status(self, *args, **kwargs):\r\n return execute(self._show_cluster_status, *args, **kwargs)", "def cluster_amazon_video_game() -> Dict:\n return dict(model=None, score=None, clusters=None)", "def clustering(dataset, logger):\n all_instances = dataset\n meta_dataset = collections.defaultdict(list)\n for instance in all_instances:\n meta_dataset[instance['label']].append(instance['coordinate'])\n\n tasklist = map(\n lambda item, meta_dataset=meta_dataset, logger=logger: (\n item[0],\n clustering_by_label,\n (item[1], item[0], meta_dataset, logger)), meta_dataset.items())\n\n # pool = multiprocessing.pool.Pool(PROCESS_COUNT)\n # clusters = dict(pool.map(map_generate_tuple, tasklist))\n clusters = dict(map(map_generate_tuple, tasklist))\n # pool.close()\n # pool.join()\n\n return clusters", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def is_distributed(args: dict) -> bool:\n\n return args.local_rank != -1", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def is_distributed() -> NotImplementedError:\n raise NotImplementedError()", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)", "def cluster_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"cluster_enabled\")", "def test_update_hyperflex_cluster(self):\n pass", "def cluster_start(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.start(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster start failed\")", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def find_cluster(self, id):\n raise NotImplementedError", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def delete_cluster(self):\n cf_namespace_id = self.create_or_fetch_namespace()\n self.delete_action(cf_namespace_id)\n self.create_action(cf_namespace_id)\n self.invoke_action(cf_namespace_id)" ]
[ "0.62720895", "0.5639447", "0.55054975", "0.547503", "0.54189545", "0.540334", "0.5387055", "0.53727955", "0.53659165", "0.5347129", "0.5334057", "0.5287203", "0.52828735", "0.5273932", "0.52656776", "0.5226672", "0.5215221", "0.5213449", "0.52105623", "0.51949185", "0.5172198", "0.5170626", "0.5165022", "0.5155144", "0.514895", "0.51457226", "0.51440895", "0.5141128", "0.5117461", "0.5111542", "0.511084", "0.510794", "0.5101917", "0.50927764", "0.5092245", "0.5087606", "0.5076839", "0.5068755", "0.5065344", "0.5064989", "0.5061867", "0.5030651", "0.5028261", "0.50080854", "0.5001391", "0.49902225", "0.4987471", "0.49871898", "0.4979407", "0.49656528", "0.49558163", "0.49534374", "0.4938985", "0.49366942", "0.4929509", "0.4921237", "0.4918659", "0.49133134", "0.4896117", "0.48879278", "0.48871502", "0.4885366", "0.4880396", "0.48793003", "0.48784766", "0.48720318", "0.487081", "0.48666024", "0.48580056", "0.48561385", "0.48534465", "0.48477152", "0.48451665", "0.48424125", "0.48335359", "0.48299208", "0.48214602", "0.48206672", "0.4820603", "0.4820249", "0.48192036", "0.481889", "0.48059365", "0.48059365", "0.48017284", "0.48005193", "0.47961858", "0.47942272", "0.47918174", "0.47915056", "0.47908682", "0.47843394", "0.47822562", "0.47806665", "0.47763386", "0.4769744", "0.4761611", "0.47604394", "0.4754585", "0.47512943", "0.4748613" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_slow_log_records_with_options( self, request: dds_20151201_models.DescribeSlowLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeSlowLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.order_type): query['OrderType'] = request.order_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeSlowLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeSlowLogRecordsResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_slow_log_records_with_options_async( self, request: dds_20151201_models.DescribeSlowLogRecordsRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeSlowLogRecordsResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.dbname): query['DBName'] = request.dbname if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.order_type): query['OrderType'] = request.order_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeSlowLogRecords', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeSlowLogRecordsResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def _load_disk(self):\r\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6249917", "0.600348", "0.5359398", "0.5338779", "0.5310546", "0.52928746", "0.5282921", "0.5279352", "0.5279001", "0.52591884", "0.521372", "0.52056307", "0.5136519", "0.5106076", "0.5093585", "0.5093585", "0.508893", "0.50791097", "0.507007", "0.5068071", "0.50492823", "0.5047607", "0.5041982", "0.5033806", "0.50300545", "0.50300545", "0.50220966", "0.50161654", "0.49987411", "0.4973591", "0.49686015", "0.4965938", "0.49597555", "0.4947962", "0.4918784", "0.4917084", "0.49052343", "0.49005905", "0.48973042", "0.48950753", "0.48905793", "0.48889422", "0.48816362", "0.48815823", "0.48786694", "0.48686954", "0.4864698", "0.48646566", "0.4858941", "0.48564273", "0.485455", "0.4843747", "0.48412818", "0.48370776", "0.48358908", "0.4833253", "0.48229158", "0.4806377", "0.4805858", "0.48033783", "0.48021865", "0.47964823", "0.47750932", "0.4768405", "0.4758163", "0.4757573", "0.47427362", "0.4735837", "0.4733725", "0.47297028", "0.47232515", "0.47209674", "0.47128278", "0.47117206", "0.47072574", "0.47022194", "0.46999288", "0.46977976", "0.46937203", "0.46901488", "0.4685285", "0.4685246", "0.46755475", "0.4670844", "0.46674314", "0.46657613", "0.4653522", "0.46523577", "0.46428913", "0.46363285", "0.46357659", "0.46331036", "0.46296275", "0.46278575", "0.4626488", "0.46094543", "0.45972398", "0.45966548", "0.45935205", "0.459188", "0.4590052" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def describe_slow_log_records( self, request: dds_20151201_models.DescribeSlowLogRecordsRequest, ) -> dds_20151201_models.DescribeSlowLogRecordsResponse: runtime = util_models.RuntimeOptions() return self.describe_slow_log_records_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def describe_slow_log_records_async( self, request: dds_20151201_models.DescribeSlowLogRecordsRequest, ) -> dds_20151201_models.DescribeSlowLogRecordsResponse: runtime = util_models.RuntimeOptions() return await self.describe_slow_log_records_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.62486845", "0.60031223", "0.53603905", "0.53379613", "0.5311335", "0.52926815", "0.5282698", "0.52791435", "0.5279122", "0.525997", "0.52146244", "0.52052414", "0.5137806", "0.51062226", "0.5093556", "0.5093556", "0.5088433", "0.5079323", "0.50690126", "0.5067886", "0.5049035", "0.5046898", "0.50420696", "0.50331473", "0.5030297", "0.5030297", "0.5022431", "0.5016063", "0.49990737", "0.4972949", "0.4968265", "0.4965747", "0.49598062", "0.49484637", "0.49194708", "0.49178702", "0.4905472", "0.49004963", "0.48970413", "0.489522", "0.48909563", "0.4887827", "0.48821267", "0.4881891", "0.48785982", "0.48678872", "0.48652878", "0.48639703", "0.48581663", "0.48568252", "0.4854318", "0.48435727", "0.48408124", "0.48365796", "0.4836489", "0.48329967", "0.4823732", "0.4806852", "0.4805655", "0.48033065", "0.480192", "0.4796162", "0.4775947", "0.47686175", "0.4759035", "0.47575393", "0.47434506", "0.47358102", "0.47347638", "0.47308475", "0.47239348", "0.47207734", "0.47131824", "0.47117183", "0.47061667", "0.4702254", "0.4699498", "0.4697398", "0.46924555", "0.46898118", "0.4685107", "0.46848604", "0.46757394", "0.4671213", "0.46672994", "0.4666417", "0.46534035", "0.4653088", "0.46438026", "0.4637459", "0.4635404", "0.46326485", "0.46303207", "0.46281067", "0.4626461", "0.4609743", "0.45968643", "0.45966214", "0.45938614", "0.45920384", "0.45898995" ]
0.0
-1
You can use the custom key obtained by calling the DescribeUserEncryptionKeyList operation to enable TDE. For more information, see [ModifyDBInstanceTDE](~~131267~~).
def describe_user_encryption_key_list_with_options( self, request: dds_20151201_models.DescribeUserEncryptionKeyListRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.target_region_id): query['TargetRegionId'] = request.target_region_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeUserEncryptionKeyList', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeUserEncryptionKeyListResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def _derive_key_iv(nonce, user_key, settings):\n if settings.ticketCipher == \"aes128gcm\":\n prf_name = \"sha256\"\n prf_size = 32\n else:\n prf_name = \"sha384\"\n prf_size = 48\n\n # mix the nonce with the key set by user\n secret = bytearray(prf_size)\n secret = secureHMAC(secret, nonce, prf_name)\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, user_key, prf_name)\n\n ticket_secret = derive_secret(secret,\n bytearray(b'SessionTicket secret'),\n None, prf_name)\n\n key = HKDF_expand_label(ticket_secret, b\"key\", b\"\", len(user_key),\n prf_name)\n # all AEADs use 12 byte long IV\n iv = HKDF_expand_label(ticket_secret, b\"iv\", b\"\", 12, prf_name)\n return key, iv", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def save_symmetric_key(self, key, user):\n self.temp_passphrase = key\n self.send_request(user, self.KM_TEMP_KEY_ACK)", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def test_create_digital_access_key(self):\n pass", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def create_key ():", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n exkey_str: str) -> HdWallet:\n\n # Create BIP object from extended key\n bip_obj = self.__GetBipClass().FromExtendedKey(exkey_str, self.m_coin_idx)\n\n # Create wallet\n return HdWallet(wallet_name=wallet_name,\n bip_obj=bip_obj)", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def sendKeyEventVirtualMachine(self,node,vmid, key):\n post_data = {'key': str(key)}\n data = self.connect('put',\"nodes/%s/qemu/%s/sendkey\" % (node,vmid), post_data)\n return data", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def generate_secret(self,\n passphrase: str, otpstring: str, key: bytes,\n **kwargs\n ):\n assert self._state is not None, 'Unseal the vault first'\n otp = YubikeyOTP.parse(otpstring, key)\n\n kdf_config = self._vault_kdf.settings.copy()\n kdf_config.update(**kwargs)\n\n assert otp.public_uid not in self._state, \\\n 'This YubiKey is already in use'\n self._state[otp.public_uid] = YKContext.init(\n key=key, passphrase=passphrase, otp=otp,\n **kdf_config\n )", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def key_manager():\n key = DBKey(5, [], 2)\n key.receive_db_key()\n key.send_db_key()\n return key", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def private_key(self):", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "def trustee_keygenerator(request, election, trustee):\n eg_params_json = utils.to_json(ELGAMAL_PARAMS_LD_OBJECT.toJSONDict())\n\n return render_template(request, \"election_keygenerator\", {'eg_params_json': eg_params_json, 'election': election, 'trustee': trustee})", "def devkey():\r\n\r\n keys = []\r\n with open('licence.txt','r') as keyfile:\r\n keys.append(keyfile.read())\r\n keys = keys[0].split('\\n')\r\n\r\n twt = Twitter(keys[0], keys[1], keys[2], keys[3])\r\n\r\n return(twt)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def wallet(mnemonics, terra):\n m = mnemonics[0][\"mnemonic\"]\n return terra.wallet(MnemonicKey(m))", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def configure_enable_aes_encryption(device, master_key):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*New\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"key config-key password-encrypt\", reply=dialog)\n device.configure(\"password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not enables aes password encryption on device {device}.\\nError:\"\n \" {e}\".format(device=device.name, e=str(e))\n )", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def DeriveNextKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_tsigkey(self, context, tsigkey):\n tsigkey_m = self._get_tsigkey(tsigkey['id'])\n\n # Store a copy of the original name..\n original_name = tsigkey_m.name\n\n tsigkey_m.update({\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # If the name changed, Update the necessary DomainMetadata records\n if original_name != tsigkey['name']:\n self.session.query(models.DomainMetadata)\\\n .filter_by(kind='TSIG-ALLOW-AXFR', content=original_name)\\\n .update(content=tsigkey['name'])", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def configure_disable_config_key_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove config-key password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def get_key(self):\r\n return self.__encryption_key", "def ecdsaPrivkey(self):\n return SigningKey.from_string(\n string=self.rawPrivkey(), curve=SECP256k1)", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def GetVoucherManagerKeyForIndex(idx):\n return unsigned(kern.globals.iv_global_table[idx].ivgte_key)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def generate_symmetric_key():\n return Fernet.generate_key()", "def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def AddPrivateKeyFlag(parser, required=False):\n help_text = \"\"\"\\\n Unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with\n the Client Certificate. Database Migration Service encrypts the value when\n storing it.\n \"\"\"\n parser.add_argument('--private-key', help=help_text, required=required)", "def set_key(self, key, transpose=False):\n if transpose:\n raise NotImplementedError('transpose not implemented')\n\n self._menu_select('Edit->Key Signature')\n\n key_dialog = self._app.window(class_name='TKEY')\n key_dialog.wait('ready')\n key_dialog.TComboBox1.select(key)\n key_dialog.TRadioButton4.click() # No Transpose\n key_dialog.TButton3.click() # OK\n self.wait_ready()", "def test_ec_no(self):\n key = c.KEY_EC\n usage = [\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n ]\n self.assertFalse(utils.check_key_usage(key, usage))", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def deactive_key(iam_username):\n\n try:\n previous_secret_value = secretmanager.get_secret_value(\n SecretId=iam_username, VersionStage=\"AWSPREVIOUS\"\n )\n previous_secret_data = json.loads(previous_secret_value[\"SecretString\"])\n previous_access_key = previous_secret_data[\"AccessKey\"]\n\n \n print(\n f\"deactivating access key {previous_access_key} \"\n f\"for IAM user {iam_username}\"\n )\n\n iam.update_access_key(\n AccessKeyId=previous_access_key, Status=\"Inactive\", UserName=iam_username\n )\n\n emailmsg = f\"Hello,\\n\\n\" f\"The previous access key {previous_access_key}\"\n\n emailmsg = (\n f\"{emailmsg} has been disabled for {iam_username}.\\n\\n\"\n f\"This key will be deleted in the next 14 days. \"\n f\"If your application has lost access, be sure to update the \"\n f\"access key.\\n You can find the new key by looking up the secret \"\n f'\"{iam_username}\" under secrets manager via AWS Console '\n f\"in {AWS_REGION_NAME}.\\n\\n\"\n )\n\n sns.publish(\n TopicArn=SNS_TOPIC_ARN,\n Message=emailmsg,\n Subject=\"AWS Access Key Rotation: Previous key deactivated for \"\n f\"{iam_username}\",\n )\n print(\"Access Key has been deacivated\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def _configEncryptedPass():\n try:\n utils.configEncryptedPass(controller.CONF[\"ENCRYPTED_DB_PASS\"])\n except:\n logging.error(\"ERROR Editing engine local configuration file.\")\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CONFIG_ENGINE)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def test_secretbox_enc_dec(test_data, minion_opts):\n # Store the data\n with patch(\"salt.runners.nacl.__opts__\", minion_opts, create=True):\n ret = nacl.keygen()\n assert \"pk\" in ret\n assert \"sk\" in ret\n pk = ret[\"pk\"]\n sk = ret[\"sk\"]\n\n # Encrypt with pk\n encrypted_data = nacl.secretbox_encrypt(\n data=test_data,\n sk=sk,\n )\n\n # Decrypt with sk\n ret = nacl.secretbox_decrypt(\n data=encrypted_data,\n sk=sk,\n )\n assert test_data == ret", "def setPassphrase( self , passphrase ):\n\t\tself.passphrase\t= passphrase\n\t\t\n\t\t# Generate and log the generated PMK.\n\t\tself.PMK = pbkdf2_bin( self.passphrase , self.ssid , 4096 , 32 )\n\t\tself.logger.logKey( 'Pairwise Master Key' , self.PMK )", "def set_tokenterminal_key(\n key: str, persist: bool = False, show_output: bool = False\n) -> str:\n handle_credential(\"API_TOKEN_TERMINAL_KEY\", key, persist)\n return check_tokenterminal_key(show_output)", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def configure_service_password_encryption(device):\n\n try:\n device.configure(\"service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not configure service password encryption\"\n )", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key" ]
[ "0.6331765", "0.5759311", "0.56402856", "0.55784595", "0.55656636", "0.5502756", "0.54750466", "0.5373099", "0.5144797", "0.5122141", "0.5041896", "0.5034966", "0.49652913", "0.49571082", "0.49031255", "0.48929504", "0.47773162", "0.47277826", "0.47213864", "0.46845135", "0.46508694", "0.4631471", "0.4613264", "0.46118814", "0.45919737", "0.45791653", "0.45643374", "0.45619097", "0.45438543", "0.45341885", "0.45315954", "0.45201147", "0.45122862", "0.4506182", "0.45022428", "0.44969547", "0.44802934", "0.4469784", "0.44684833", "0.44677836", "0.44434616", "0.4431479", "0.44282323", "0.44245413", "0.4423096", "0.44040415", "0.43914306", "0.43884116", "0.43864217", "0.43814126", "0.43806857", "0.43795836", "0.43782127", "0.436287", "0.436207", "0.43507287", "0.43263394", "0.43254805", "0.43204176", "0.43151617", "0.4310474", "0.43086183", "0.42965767", "0.42931113", "0.4291386", "0.42904833", "0.42854962", "0.42842814", "0.42830905", "0.4277821", "0.42733228", "0.42719632", "0.42659682", "0.42619833", "0.42494163", "0.4245101", "0.42444152", "0.42407292", "0.42404377", "0.4239527", "0.42350894", "0.42322585", "0.42317134", "0.4227303", "0.42264822", "0.42255625", "0.42220634", "0.4199288", "0.41987932", "0.41940632", "0.418726", "0.41835597", "0.41829175", "0.4181796", "0.4181173", "0.41798377", "0.41703123", "0.4169756", "0.41675168", "0.41669336" ]
0.57961434
1
You can use the custom key obtained by calling the DescribeUserEncryptionKeyList operation to enable TDE. For more information, see [ModifyDBInstanceTDE](~~131267~~).
async def describe_user_encryption_key_list_with_options_async( self, request: dds_20151201_models.DescribeUserEncryptionKeyListRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.target_region_id): query['TargetRegionId'] = request.target_region_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='DescribeUserEncryptionKeyList', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.DescribeUserEncryptionKeyListResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def _derive_key_iv(nonce, user_key, settings):\n if settings.ticketCipher == \"aes128gcm\":\n prf_name = \"sha256\"\n prf_size = 32\n else:\n prf_name = \"sha384\"\n prf_size = 48\n\n # mix the nonce with the key set by user\n secret = bytearray(prf_size)\n secret = secureHMAC(secret, nonce, prf_name)\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, user_key, prf_name)\n\n ticket_secret = derive_secret(secret,\n bytearray(b'SessionTicket secret'),\n None, prf_name)\n\n key = HKDF_expand_label(ticket_secret, b\"key\", b\"\", len(user_key),\n prf_name)\n # all AEADs use 12 byte long IV\n iv = HKDF_expand_label(ticket_secret, b\"iv\", b\"\", 12, prf_name)\n return key, iv", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def save_symmetric_key(self, key, user):\n self.temp_passphrase = key\n self.send_request(user, self.KM_TEMP_KEY_ACK)", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "def test_create_digital_access_key(self):\n pass", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def create_key ():", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def CreateFromExtendedKey(self,\n wallet_name: str,\n exkey_str: str) -> HdWallet:\n\n # Create BIP object from extended key\n bip_obj = self.__GetBipClass().FromExtendedKey(exkey_str, self.m_coin_idx)\n\n # Create wallet\n return HdWallet(wallet_name=wallet_name,\n bip_obj=bip_obj)", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def sendKeyEventVirtualMachine(self,node,vmid, key):\n post_data = {'key': str(key)}\n data = self.connect('put',\"nodes/%s/qemu/%s/sendkey\" % (node,vmid), post_data)\n return data", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def generate_secret(self,\n passphrase: str, otpstring: str, key: bytes,\n **kwargs\n ):\n assert self._state is not None, 'Unseal the vault first'\n otp = YubikeyOTP.parse(otpstring, key)\n\n kdf_config = self._vault_kdf.settings.copy()\n kdf_config.update(**kwargs)\n\n assert otp.public_uid not in self._state, \\\n 'This YubiKey is already in use'\n self._state[otp.public_uid] = YKContext.init(\n key=key, passphrase=passphrase, otp=otp,\n **kdf_config\n )", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def key_manager():\n key = DBKey(5, [], 2)\n key.receive_db_key()\n key.send_db_key()\n return key", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def private_key(self):", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "def trustee_keygenerator(request, election, trustee):\n eg_params_json = utils.to_json(ELGAMAL_PARAMS_LD_OBJECT.toJSONDict())\n\n return render_template(request, \"election_keygenerator\", {'eg_params_json': eg_params_json, 'election': election, 'trustee': trustee})", "def devkey():\r\n\r\n keys = []\r\n with open('licence.txt','r') as keyfile:\r\n keys.append(keyfile.read())\r\n keys = keys[0].split('\\n')\r\n\r\n twt = Twitter(keys[0], keys[1], keys[2], keys[3])\r\n\r\n return(twt)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def wallet(mnemonics, terra):\n m = mnemonics[0][\"mnemonic\"]\n return terra.wallet(MnemonicKey(m))", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def configure_enable_aes_encryption(device, master_key):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*New\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"key config-key password-encrypt\", reply=dialog)\n device.configure(\"password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not enables aes password encryption on device {device}.\\nError:\"\n \" {e}\".format(device=device.name, e=str(e))\n )", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def DeriveNextKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_tsigkey(self, context, tsigkey):\n tsigkey_m = self._get_tsigkey(tsigkey['id'])\n\n # Store a copy of the original name..\n original_name = tsigkey_m.name\n\n tsigkey_m.update({\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # If the name changed, Update the necessary DomainMetadata records\n if original_name != tsigkey['name']:\n self.session.query(models.DomainMetadata)\\\n .filter_by(kind='TSIG-ALLOW-AXFR', content=original_name)\\\n .update(content=tsigkey['name'])", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def configure_disable_config_key_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove config-key password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def get_key(self):\r\n return self.__encryption_key", "def ecdsaPrivkey(self):\n return SigningKey.from_string(\n string=self.rawPrivkey(), curve=SECP256k1)", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def GetVoucherManagerKeyForIndex(idx):\n return unsigned(kern.globals.iv_global_table[idx].ivgte_key)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def generate_symmetric_key():\n return Fernet.generate_key()", "def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def AddPrivateKeyFlag(parser, required=False):\n help_text = \"\"\"\\\n Unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with\n the Client Certificate. Database Migration Service encrypts the value when\n storing it.\n \"\"\"\n parser.add_argument('--private-key', help=help_text, required=required)", "def set_key(self, key, transpose=False):\n if transpose:\n raise NotImplementedError('transpose not implemented')\n\n self._menu_select('Edit->Key Signature')\n\n key_dialog = self._app.window(class_name='TKEY')\n key_dialog.wait('ready')\n key_dialog.TComboBox1.select(key)\n key_dialog.TRadioButton4.click() # No Transpose\n key_dialog.TButton3.click() # OK\n self.wait_ready()", "def test_ec_no(self):\n key = c.KEY_EC\n usage = [\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n ]\n self.assertFalse(utils.check_key_usage(key, usage))", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def deactive_key(iam_username):\n\n try:\n previous_secret_value = secretmanager.get_secret_value(\n SecretId=iam_username, VersionStage=\"AWSPREVIOUS\"\n )\n previous_secret_data = json.loads(previous_secret_value[\"SecretString\"])\n previous_access_key = previous_secret_data[\"AccessKey\"]\n\n \n print(\n f\"deactivating access key {previous_access_key} \"\n f\"for IAM user {iam_username}\"\n )\n\n iam.update_access_key(\n AccessKeyId=previous_access_key, Status=\"Inactive\", UserName=iam_username\n )\n\n emailmsg = f\"Hello,\\n\\n\" f\"The previous access key {previous_access_key}\"\n\n emailmsg = (\n f\"{emailmsg} has been disabled for {iam_username}.\\n\\n\"\n f\"This key will be deleted in the next 14 days. \"\n f\"If your application has lost access, be sure to update the \"\n f\"access key.\\n You can find the new key by looking up the secret \"\n f'\"{iam_username}\" under secrets manager via AWS Console '\n f\"in {AWS_REGION_NAME}.\\n\\n\"\n )\n\n sns.publish(\n TopicArn=SNS_TOPIC_ARN,\n Message=emailmsg,\n Subject=\"AWS Access Key Rotation: Previous key deactivated for \"\n f\"{iam_username}\",\n )\n print(\"Access Key has been deacivated\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def _configEncryptedPass():\n try:\n utils.configEncryptedPass(controller.CONF[\"ENCRYPTED_DB_PASS\"])\n except:\n logging.error(\"ERROR Editing engine local configuration file.\")\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CONFIG_ENGINE)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def test_secretbox_enc_dec(test_data, minion_opts):\n # Store the data\n with patch(\"salt.runners.nacl.__opts__\", minion_opts, create=True):\n ret = nacl.keygen()\n assert \"pk\" in ret\n assert \"sk\" in ret\n pk = ret[\"pk\"]\n sk = ret[\"sk\"]\n\n # Encrypt with pk\n encrypted_data = nacl.secretbox_encrypt(\n data=test_data,\n sk=sk,\n )\n\n # Decrypt with sk\n ret = nacl.secretbox_decrypt(\n data=encrypted_data,\n sk=sk,\n )\n assert test_data == ret", "def set_tokenterminal_key(\n key: str, persist: bool = False, show_output: bool = False\n) -> str:\n handle_credential(\"API_TOKEN_TERMINAL_KEY\", key, persist)\n return check_tokenterminal_key(show_output)", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def setPassphrase( self , passphrase ):\n\t\tself.passphrase\t= passphrase\n\t\t\n\t\t# Generate and log the generated PMK.\n\t\tself.PMK = pbkdf2_bin( self.passphrase , self.ssid , 4096 , 32 )\n\t\tself.logger.logKey( 'Pairwise Master Key' , self.PMK )", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def configure_service_password_encryption(device):\n\n try:\n device.configure(\"service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not configure service password encryption\"\n )", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key" ]
[ "0.6330976", "0.57954943", "0.576141", "0.5639361", "0.55775625", "0.55667526", "0.5476863", "0.5375267", "0.5147358", "0.5121024", "0.50405693", "0.5036546", "0.4966299", "0.495867", "0.49050412", "0.48923448", "0.4775781", "0.4725484", "0.47229552", "0.46846083", "0.46528774", "0.46332484", "0.46136352", "0.4613422", "0.45934555", "0.45817637", "0.45646065", "0.45619288", "0.45466655", "0.45338303", "0.45330852", "0.4522693", "0.45154083", "0.45072708", "0.45034996", "0.4496248", "0.448232", "0.44717562", "0.44692364", "0.4468665", "0.44452128", "0.44334477", "0.44308007", "0.44272542", "0.442539", "0.4405445", "0.43906814", "0.43889225", "0.43859833", "0.43832663", "0.43821812", "0.43790212", "0.43786073", "0.43646887", "0.4363487", "0.43505985", "0.43288672", "0.43276793", "0.43231946", "0.43154556", "0.43135023", "0.4311885", "0.42977664", "0.4296338", "0.42928717", "0.42898366", "0.42857412", "0.42857075", "0.42847785", "0.42805973", "0.42759788", "0.42730865", "0.42679584", "0.42640486", "0.42525938", "0.42467585", "0.42450678", "0.42430392", "0.42420882", "0.4241899", "0.4236423", "0.42336628", "0.4232728", "0.42299035", "0.42271343", "0.42264795", "0.42238513", "0.420045", "0.41994599", "0.41936177", "0.41880974", "0.41846985", "0.41846618", "0.41835555", "0.41825074", "0.41812557", "0.41716644", "0.41706476", "0.41692173", "0.41679543" ]
0.55019593
6
You can use the custom key obtained by calling the DescribeUserEncryptionKeyList operation to enable TDE. For more information, see [ModifyDBInstanceTDE](~~131267~~).
def describe_user_encryption_key_list( self, request: dds_20151201_models.DescribeUserEncryptionKeyListRequest, ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse: runtime = util_models.RuntimeOptions() return self.describe_user_encryption_key_list_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def _derive_key_iv(nonce, user_key, settings):\n if settings.ticketCipher == \"aes128gcm\":\n prf_name = \"sha256\"\n prf_size = 32\n else:\n prf_name = \"sha384\"\n prf_size = 48\n\n # mix the nonce with the key set by user\n secret = bytearray(prf_size)\n secret = secureHMAC(secret, nonce, prf_name)\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, user_key, prf_name)\n\n ticket_secret = derive_secret(secret,\n bytearray(b'SessionTicket secret'),\n None, prf_name)\n\n key = HKDF_expand_label(ticket_secret, b\"key\", b\"\", len(user_key),\n prf_name)\n # all AEADs use 12 byte long IV\n iv = HKDF_expand_label(ticket_secret, b\"iv\", b\"\", 12, prf_name)\n return key, iv", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def save_symmetric_key(self, key, user):\n self.temp_passphrase = key\n self.send_request(user, self.KM_TEMP_KEY_ACK)", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def test_create_digital_access_key(self):\n pass", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def create_key ():", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n exkey_str: str) -> HdWallet:\n\n # Create BIP object from extended key\n bip_obj = self.__GetBipClass().FromExtendedKey(exkey_str, self.m_coin_idx)\n\n # Create wallet\n return HdWallet(wallet_name=wallet_name,\n bip_obj=bip_obj)", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def sendKeyEventVirtualMachine(self,node,vmid, key):\n post_data = {'key': str(key)}\n data = self.connect('put',\"nodes/%s/qemu/%s/sendkey\" % (node,vmid), post_data)\n return data", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def generate_secret(self,\n passphrase: str, otpstring: str, key: bytes,\n **kwargs\n ):\n assert self._state is not None, 'Unseal the vault first'\n otp = YubikeyOTP.parse(otpstring, key)\n\n kdf_config = self._vault_kdf.settings.copy()\n kdf_config.update(**kwargs)\n\n assert otp.public_uid not in self._state, \\\n 'This YubiKey is already in use'\n self._state[otp.public_uid] = YKContext.init(\n key=key, passphrase=passphrase, otp=otp,\n **kdf_config\n )", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def key_manager():\n key = DBKey(5, [], 2)\n key.receive_db_key()\n key.send_db_key()\n return key", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def private_key(self):", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "def trustee_keygenerator(request, election, trustee):\n eg_params_json = utils.to_json(ELGAMAL_PARAMS_LD_OBJECT.toJSONDict())\n\n return render_template(request, \"election_keygenerator\", {'eg_params_json': eg_params_json, 'election': election, 'trustee': trustee})", "def devkey():\r\n\r\n keys = []\r\n with open('licence.txt','r') as keyfile:\r\n keys.append(keyfile.read())\r\n keys = keys[0].split('\\n')\r\n\r\n twt = Twitter(keys[0], keys[1], keys[2], keys[3])\r\n\r\n return(twt)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def wallet(mnemonics, terra):\n m = mnemonics[0][\"mnemonic\"]\n return terra.wallet(MnemonicKey(m))", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def configure_enable_aes_encryption(device, master_key):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*New\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"key config-key password-encrypt\", reply=dialog)\n device.configure(\"password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not enables aes password encryption on device {device}.\\nError:\"\n \" {e}\".format(device=device.name, e=str(e))\n )", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def DeriveNextKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_tsigkey(self, context, tsigkey):\n tsigkey_m = self._get_tsigkey(tsigkey['id'])\n\n # Store a copy of the original name..\n original_name = tsigkey_m.name\n\n tsigkey_m.update({\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # If the name changed, Update the necessary DomainMetadata records\n if original_name != tsigkey['name']:\n self.session.query(models.DomainMetadata)\\\n .filter_by(kind='TSIG-ALLOW-AXFR', content=original_name)\\\n .update(content=tsigkey['name'])", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def configure_disable_config_key_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove config-key password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def get_key(self):\r\n return self.__encryption_key", "def ecdsaPrivkey(self):\n return SigningKey.from_string(\n string=self.rawPrivkey(), curve=SECP256k1)", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def GetVoucherManagerKeyForIndex(idx):\n return unsigned(kern.globals.iv_global_table[idx].ivgte_key)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def generate_symmetric_key():\n return Fernet.generate_key()", "def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def AddPrivateKeyFlag(parser, required=False):\n help_text = \"\"\"\\\n Unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with\n the Client Certificate. Database Migration Service encrypts the value when\n storing it.\n \"\"\"\n parser.add_argument('--private-key', help=help_text, required=required)", "def set_key(self, key, transpose=False):\n if transpose:\n raise NotImplementedError('transpose not implemented')\n\n self._menu_select('Edit->Key Signature')\n\n key_dialog = self._app.window(class_name='TKEY')\n key_dialog.wait('ready')\n key_dialog.TComboBox1.select(key)\n key_dialog.TRadioButton4.click() # No Transpose\n key_dialog.TButton3.click() # OK\n self.wait_ready()", "def test_ec_no(self):\n key = c.KEY_EC\n usage = [\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n ]\n self.assertFalse(utils.check_key_usage(key, usage))", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def deactive_key(iam_username):\n\n try:\n previous_secret_value = secretmanager.get_secret_value(\n SecretId=iam_username, VersionStage=\"AWSPREVIOUS\"\n )\n previous_secret_data = json.loads(previous_secret_value[\"SecretString\"])\n previous_access_key = previous_secret_data[\"AccessKey\"]\n\n \n print(\n f\"deactivating access key {previous_access_key} \"\n f\"for IAM user {iam_username}\"\n )\n\n iam.update_access_key(\n AccessKeyId=previous_access_key, Status=\"Inactive\", UserName=iam_username\n )\n\n emailmsg = f\"Hello,\\n\\n\" f\"The previous access key {previous_access_key}\"\n\n emailmsg = (\n f\"{emailmsg} has been disabled for {iam_username}.\\n\\n\"\n f\"This key will be deleted in the next 14 days. \"\n f\"If your application has lost access, be sure to update the \"\n f\"access key.\\n You can find the new key by looking up the secret \"\n f'\"{iam_username}\" under secrets manager via AWS Console '\n f\"in {AWS_REGION_NAME}.\\n\\n\"\n )\n\n sns.publish(\n TopicArn=SNS_TOPIC_ARN,\n Message=emailmsg,\n Subject=\"AWS Access Key Rotation: Previous key deactivated for \"\n f\"{iam_username}\",\n )\n print(\"Access Key has been deacivated\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def _configEncryptedPass():\n try:\n utils.configEncryptedPass(controller.CONF[\"ENCRYPTED_DB_PASS\"])\n except:\n logging.error(\"ERROR Editing engine local configuration file.\")\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CONFIG_ENGINE)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def test_secretbox_enc_dec(test_data, minion_opts):\n # Store the data\n with patch(\"salt.runners.nacl.__opts__\", minion_opts, create=True):\n ret = nacl.keygen()\n assert \"pk\" in ret\n assert \"sk\" in ret\n pk = ret[\"pk\"]\n sk = ret[\"sk\"]\n\n # Encrypt with pk\n encrypted_data = nacl.secretbox_encrypt(\n data=test_data,\n sk=sk,\n )\n\n # Decrypt with sk\n ret = nacl.secretbox_decrypt(\n data=encrypted_data,\n sk=sk,\n )\n assert test_data == ret", "def setPassphrase( self , passphrase ):\n\t\tself.passphrase\t= passphrase\n\t\t\n\t\t# Generate and log the generated PMK.\n\t\tself.PMK = pbkdf2_bin( self.passphrase , self.ssid , 4096 , 32 )\n\t\tself.logger.logKey( 'Pairwise Master Key' , self.PMK )", "def set_tokenterminal_key(\n key: str, persist: bool = False, show_output: bool = False\n) -> str:\n handle_credential(\"API_TOKEN_TERMINAL_KEY\", key, persist)\n return check_tokenterminal_key(show_output)", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def configure_service_password_encryption(device):\n\n try:\n device.configure(\"service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not configure service password encryption\"\n )", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key" ]
[ "0.6331765", "0.57961434", "0.5759311", "0.56402856", "0.55784595", "0.55656636", "0.5502756", "0.54750466", "0.5373099", "0.5144797", "0.5041896", "0.5034966", "0.49652913", "0.49571082", "0.49031255", "0.48929504", "0.47773162", "0.47277826", "0.47213864", "0.46845135", "0.46508694", "0.4631471", "0.4613264", "0.46118814", "0.45919737", "0.45791653", "0.45643374", "0.45619097", "0.45438543", "0.45341885", "0.45315954", "0.45201147", "0.45122862", "0.4506182", "0.45022428", "0.44969547", "0.44802934", "0.4469784", "0.44684833", "0.44677836", "0.44434616", "0.4431479", "0.44282323", "0.44245413", "0.4423096", "0.44040415", "0.43914306", "0.43884116", "0.43864217", "0.43814126", "0.43806857", "0.43795836", "0.43782127", "0.436287", "0.436207", "0.43507287", "0.43263394", "0.43254805", "0.43204176", "0.43151617", "0.4310474", "0.43086183", "0.42965767", "0.42931113", "0.4291386", "0.42904833", "0.42854962", "0.42842814", "0.42830905", "0.4277821", "0.42733228", "0.42719632", "0.42659682", "0.42619833", "0.42494163", "0.4245101", "0.42444152", "0.42407292", "0.42404377", "0.4239527", "0.42350894", "0.42322585", "0.42317134", "0.4227303", "0.42264822", "0.42255625", "0.42220634", "0.4199288", "0.41987932", "0.41940632", "0.418726", "0.41835597", "0.41829175", "0.4181796", "0.4181173", "0.41798377", "0.41703123", "0.4169756", "0.41675168", "0.41669336" ]
0.5122141
10
You can use the custom key obtained by calling the DescribeUserEncryptionKeyList operation to enable TDE. For more information, see [ModifyDBInstanceTDE](~~131267~~).
async def describe_user_encryption_key_list_async( self, request: dds_20151201_models.DescribeUserEncryptionKeyListRequest, ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse: runtime = util_models.RuntimeOptions() return await self.describe_user_encryption_key_list_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )", "def describe_dbinstance_encryption_key_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n self.call_api(params, req, runtime)\n )", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def create_tsigkey(self, context, tsigkey):\n\n if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:\n raise exceptions.NotImplemented('Unsupported algorithm')\n\n tsigkey_m = models.TsigKey()\n\n tsigkey_m.update({\n 'designate_id': tsigkey['id'],\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # NOTE(kiall): Prepare and execute query to install this TSIG Key on\n # every domain. We use a manual query here since anything\n # else would be impossibly slow.\n query_select = select([\n models.Domain.__table__.c.id,\n \"'TSIG-ALLOW-AXFR'\",\n \"'%s'\" % tsigkey['name']]\n )\n\n columns = [\n models.DomainMetadata.__table__.c.domain_id,\n models.DomainMetadata.__table__.c.kind,\n models.DomainMetadata.__table__.c.content,\n ]\n\n query = InsertFromSelect(models.DomainMetadata.__table__, query_select,\n columns)\n\n # NOTE(kiall): A TX is required for, at the least, SQLite.\n self.session.begin()\n self.session.execute(query)\n self.session.commit()", "async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )", "async def describe_dbinstance_encryption_key_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceEncryptionKey',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def describe_dbinstance_encryption_key(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_encryption_key_with_options(request, runtime)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n ex_key_str: str) -> HdWalletBase:\n try:\n bip_obj = Bip32Secp256k1.FromExtendedKey(ex_key_str)\n except Bip32KeyError as ex:\n raise ValueError(f\"Invalid extended key: {ex_key_str}\") from ex\n\n # Segwit wallet uses hardened derivation, not supported by public-only objects\n if bip_obj.IsPublicOnly() and self.m_mnemonic_type == HdWalletElectrumV2MnemonicTypes.SEGWIT:\n raise ValueError(\"Only private extended keys are supported for segwit mnemonic type\")\n\n electrum_obj = self.m_electrum_cls(bip_obj)\n return HdWalletElectrumV2(wallet_name=wallet_name,\n electrum_obj=electrum_obj)", "def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "async def describe_dbinstance_encryption_key_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceEncryptionKeyRequest,\n ) -> dds_20151201_models.DescribeDBInstanceEncryptionKeyResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_dbinstance_encryption_key_with_options_async(request, runtime)", "def _derive_key_iv(nonce, user_key, settings):\n if settings.ticketCipher == \"aes128gcm\":\n prf_name = \"sha256\"\n prf_size = 32\n else:\n prf_name = \"sha384\"\n prf_size = 48\n\n # mix the nonce with the key set by user\n secret = bytearray(prf_size)\n secret = secureHMAC(secret, nonce, prf_name)\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, user_key, prf_name)\n\n ticket_secret = derive_secret(secret,\n bytearray(b'SessionTicket secret'),\n None, prf_name)\n\n key = HKDF_expand_label(ticket_secret, b\"key\", b\"\", len(user_key),\n prf_name)\n # all AEADs use 12 byte long IV\n iv = HKDF_expand_label(ticket_secret, b\"iv\", b\"\", 12, prf_name)\n return key, iv", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def add(ctx: CLIContext, user_id, resource_policy, admin, inactive, rate_limit):\n with Session() as session:\n try:\n data = session.KeyPair.create(\n user_id,\n is_active=not inactive,\n is_admin=admin,\n resource_policy=resource_policy,\n rate_limit=rate_limit)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='add',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n item_name='keypair',\n extra_info={\n 'access_key': data['keypair']['access_key'],\n 'secret_key': data['keypair']['secret_key'],\n },\n )", "def save_symmetric_key(self, key, user):\n self.temp_passphrase = key\n self.send_request(user, self.KM_TEMP_KEY_ACK)", "def keygen():\n pk, pub = generate_signing_key()\n t = PrettyTable([\"Private (install on your witness node)\",\n \"Public (publish with 'conductor enable' command)\"])\n t.align = \"l\"\n t.add_row([pk, pub])\n\n output(t, '')", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def walletinfo(test_unlock):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect() \n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n t.add_row([\"created\", mph.wallet.created()])\n t.add_row([\"locked\", mph.wallet.locked()])\n t.add_row([\"Number of stored keys\", len(mph.wallet.getPublicKeys())])\n t.add_row([\"sql-file\", mph.wallet.keyStorage.sqlDataBaseFile])\n password_storage = mph.config[\"password_storage\"]\n t.add_row([\"password_storage\", password_storage])\n password = os.environ.get(\"UNLOCK\")\n if password is not None:\n t.add_row([\"UNLOCK env set\", \"yes\"])\n else:\n t.add_row([\"UNLOCK env set\", \"no\"])\n if KEYRING_AVAILABLE:\n t.add_row([\"keyring installed\", \"yes\"])\n else:\n t.add_row([\"keyring installed\", \"no\"])\n if test_unlock:\n if unlock_wallet(stm):\n t.add_row([\"Wallet unlock\", \"successful\"])\n else:\n t.add_row([\"Wallet unlock\", \"not working\"])\n # t.add_row([\"getPublicKeys\", str(mph.wallet.getPublicKeys())])\n print(t)", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def test_create_digital_access_key(self):\n pass", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def create_key ():", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def get_data_key(self, encryption_context=None):\n return self.kms_clients[0].generate_data_key(\n KeyId=self.master_key_id,\n KeySpec='AES_256',\n EncryptionContext=encryption_context)", "def CreateFromExtendedKey(self,\n wallet_name: str,\n exkey_str: str) -> HdWallet:\n\n # Create BIP object from extended key\n bip_obj = self.__GetBipClass().FromExtendedKey(exkey_str, self.m_coin_idx)\n\n # Create wallet\n return HdWallet(wallet_name=wallet_name,\n bip_obj=bip_obj)", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def sendKeyEventVirtualMachine(self,node,vmid, key):\n post_data = {'key': str(key)}\n data = self.connect('put',\"nodes/%s/qemu/%s/sendkey\" % (node,vmid), post_data)\n return data", "def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:\n return pulumi.get(self, \"disk_encryption_key\")", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def generate_secret(self,\n passphrase: str, otpstring: str, key: bytes,\n **kwargs\n ):\n assert self._state is not None, 'Unseal the vault first'\n otp = YubikeyOTP.parse(otpstring, key)\n\n kdf_config = self._vault_kdf.settings.copy()\n kdf_config.update(**kwargs)\n\n assert otp.public_uid not in self._state, \\\n 'This YubiKey is already in use'\n self._state[otp.public_uid] = YKContext.init(\n key=key, passphrase=passphrase, otp=otp,\n **kdf_config\n )", "def encrypt_item(table_name, aws_cmk_id):\n index_key = {\"partition_attribute\": {\"S\": \"is this\"}, \"sort_attribute\": {\"N\": \"55\"}}\n plaintext_item = {\n \"example\": {\"S\": \"data\"},\n \"some numbers\": {\"N\": \"99\"},\n \"and some binary\": {\"B\": b\"\\x00\\x01\\x02\"},\n \"leave me\": {\"S\": \"alone\"}, # We want to ignore this attribute\n }\n # Collect all of the attributes that will be encrypted (used later).\n encrypted_attributes = set(plaintext_item.keys())\n encrypted_attributes.remove(\"leave me\")\n # Collect all of the attributes that will not be encrypted (used later).\n unencrypted_attributes = set(index_key.keys())\n unencrypted_attributes.add(\"leave me\")\n # Add the index pairs to the item.\n plaintext_item.update(index_key)\n\n # Create a normal client.\n client = boto3.client(\"dynamodb\")\n # Create a crypto materials provider using the specified AWS KMS key.\n aws_kms_cmp = AwsKmsCryptographicMaterialsProvider(key_id=aws_cmk_id)\n # Create attribute actions that tells the encrypted client to encrypt all attributes except one.\n actions = AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN, attribute_actions={\"leave me\": CryptoAction.DO_NOTHING}\n )\n # Use these objects to create an encrypted client.\n encrypted_client = EncryptedClient(client=client, materials_provider=aws_kms_cmp, attribute_actions=actions)\n\n # Put the item to the table, using the encrypted client to transparently encrypt it.\n encrypted_client.put_item(TableName=table_name, Item=plaintext_item)\n\n # Get the encrypted item using the standard client.\n encrypted_item = client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Get the item using the encrypted client, transparently decrypting it.\n decrypted_item = encrypted_client.get_item(TableName=table_name, Key=index_key)[\"Item\"]\n\n # Verify that all of the attributes are different in the encrypted item\n for name in encrypted_attributes:\n assert encrypted_item[name] != plaintext_item[name]\n assert decrypted_item[name] == plaintext_item[name]\n\n # Verify that all of the attributes that should not be encrypted were not.\n for name in unencrypted_attributes:\n assert decrypted_item[name] == encrypted_item[name] == plaintext_item[name]\n\n # Clean up the item\n encrypted_client.delete_item(TableName=table_name, Key=index_key)", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def key_manager():\n key = DBKey(5, [], 2)\n key.receive_db_key()\n key.send_db_key()\n return key", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def private_key(self):", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def describe_dbinstance_tdeinfo(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_dbinstance_tdeinfo_with_options(request, runtime)", "def trustee_keygenerator(request, election, trustee):\n eg_params_json = utils.to_json(ELGAMAL_PARAMS_LD_OBJECT.toJSONDict())\n\n return render_template(request, \"election_keygenerator\", {'eg_params_json': eg_params_json, 'election': election, 'trustee': trustee})", "def devkey():\r\n\r\n keys = []\r\n with open('licence.txt','r') as keyfile:\r\n keys.append(keyfile.read())\r\n keys = keys[0].split('\\n')\r\n\r\n twt = Twitter(keys[0], keys[1], keys[2], keys[3])\r\n\r\n return(twt)", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def wallet(mnemonics, terra):\n m = mnemonics[0][\"mnemonic\"]\n return terra.wallet(MnemonicKey(m))", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def configure_enable_aes_encryption(device, master_key):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*New\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"key config-key password-encrypt\", reply=dialog)\n device.configure(\"password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not enables aes password encryption on device {device}.\\nError:\"\n \" {e}\".format(device=device.name, e=str(e))\n )", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def DeriveNextKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_tsigkey(self, context, tsigkey):\n tsigkey_m = self._get_tsigkey(tsigkey['id'])\n\n # Store a copy of the original name..\n original_name = tsigkey_m.name\n\n tsigkey_m.update({\n 'name': tsigkey['name'],\n 'algorithm': tsigkey['algorithm'],\n 'secret': base64.b64encode(tsigkey['secret'])\n })\n\n tsigkey_m.save(self.session)\n\n # If the name changed, Update the necessary DomainMetadata records\n if original_name != tsigkey['name']:\n self.session.query(models.DomainMetadata)\\\n .filter_by(kind='TSIG-ALLOW-AXFR', content=original_name)\\\n .update(content=tsigkey['name'])", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def configure_disable_config_key_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove config-key password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def get_key(self):\r\n return self.__encryption_key", "def ecdsaPrivkey(self):\n return SigningKey.from_string(\n string=self.rawPrivkey(), curve=SECP256k1)", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def GetVoucherManagerKeyForIndex(idx):\n return unsigned(kern.globals.iv_global_table[idx].ivgte_key)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def ed25519_private_key(ctx):\n\n key = ed25519.Ed25519PrivateKey.generate()\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def private_key(self, seed: str) -> str:\n return nanopy.deterministic_key(seed, self.account_index)[0]", "def database_encryption(self) -> 'outputs.DatabaseEncryptionResponse':\n return pulumi.get(self, \"database_encryption\")", "def reveal_seed():\n password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file\n\n configuration = Configuration().load_configuration()\n api = get_api()\n\n try:\n wallet = api.get_private_key(configuration, password)\n click.echo('Account prv key: %s' % str(wallet.get_private_key().hex()))\n\n except InvalidPasswordException:\n click.echo('Incorrect password!')", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def encryption_oracle(pt):\n\n key = rand_bytes(16)\n iv = rand_bytes(16) # In case the mode is CBC. Generate this before\n # choosing the mode to protect against timing attacks.\n padded_pt = rand_bytes_range(5, 10) + pt + rand_bytes_range(5, 10)\n if random.randint(0, 1) == 0:\n # print True # Uncomment to check the oracle detector\n return aes_ecb_encrypt(key, padded_pt)\n else:\n # print False # Uncomment to check the oracle detector\n return aes_cbc_encrypt(key, padded_pt, iv)", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def generate_symmetric_key():\n return Fernet.generate_key()", "def generate(self):\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.generate_private_key(ec.SECP256K1(), default_backend())\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_private_key_bytes()\n self._get_naive_public_key_bytes()", "def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext", "def AddPrivateKeyFlag(parser, required=False):\n help_text = \"\"\"\\\n Unencrypted PKCS#1 or PKCS#8 PEM-encoded private key associated with\n the Client Certificate. Database Migration Service encrypts the value when\n storing it.\n \"\"\"\n parser.add_argument('--private-key', help=help_text, required=required)", "def test_ec_no(self):\n key = c.KEY_EC\n usage = [\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n ]\n self.assertFalse(utils.check_key_usage(key, usage))", "def set_key(self, key, transpose=False):\n if transpose:\n raise NotImplementedError('transpose not implemented')\n\n self._menu_select('Edit->Key Signature')\n\n key_dialog = self._app.window(class_name='TKEY')\n key_dialog.wait('ready')\n key_dialog.TComboBox1.select(key)\n key_dialog.TRadioButton4.click() # No Transpose\n key_dialog.TButton3.click() # OK\n self.wait_ready()", "def test_from_alt_text(self):\n rkeyring = dns.tsigkeyring.from_text(alt_text_keyring)\n self.assertEqual(rkeyring, rich_keyring)", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def deactive_key(iam_username):\n\n try:\n previous_secret_value = secretmanager.get_secret_value(\n SecretId=iam_username, VersionStage=\"AWSPREVIOUS\"\n )\n previous_secret_data = json.loads(previous_secret_value[\"SecretString\"])\n previous_access_key = previous_secret_data[\"AccessKey\"]\n\n \n print(\n f\"deactivating access key {previous_access_key} \"\n f\"for IAM user {iam_username}\"\n )\n\n iam.update_access_key(\n AccessKeyId=previous_access_key, Status=\"Inactive\", UserName=iam_username\n )\n\n emailmsg = f\"Hello,\\n\\n\" f\"The previous access key {previous_access_key}\"\n\n emailmsg = (\n f\"{emailmsg} has been disabled for {iam_username}.\\n\\n\"\n f\"This key will be deleted in the next 14 days. \"\n f\"If your application has lost access, be sure to update the \"\n f\"access key.\\n You can find the new key by looking up the secret \"\n f'\"{iam_username}\" under secrets manager via AWS Console '\n f\"in {AWS_REGION_NAME}.\\n\\n\"\n )\n\n sns.publish(\n TopicArn=SNS_TOPIC_ARN,\n Message=emailmsg,\n Subject=\"AWS Access Key Rotation: Previous key deactivated for \"\n f\"{iam_username}\",\n )\n print(\"Access Key has been deacivated\")\n return {\"status\": 200}\n except ClientError as e:\n print(e)\n return {\"status\": 500}", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def _configEncryptedPass():\n try:\n utils.configEncryptedPass(controller.CONF[\"ENCRYPTED_DB_PASS\"])\n except:\n logging.error(\"ERROR Editing engine local configuration file.\")\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CONFIG_ENGINE)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def test_secretbox_enc_dec(test_data, minion_opts):\n # Store the data\n with patch(\"salt.runners.nacl.__opts__\", minion_opts, create=True):\n ret = nacl.keygen()\n assert \"pk\" in ret\n assert \"sk\" in ret\n pk = ret[\"pk\"]\n sk = ret[\"sk\"]\n\n # Encrypt with pk\n encrypted_data = nacl.secretbox_encrypt(\n data=test_data,\n sk=sk,\n )\n\n # Decrypt with sk\n ret = nacl.secretbox_decrypt(\n data=encrypted_data,\n sk=sk,\n )\n assert test_data == ret", "def setPassphrase( self , passphrase ):\n\t\tself.passphrase\t= passphrase\n\t\t\n\t\t# Generate and log the generated PMK.\n\t\tself.PMK = pbkdf2_bin( self.passphrase , self.ssid , 4096 , 32 )\n\t\tself.logger.logKey( 'Pairwise Master Key' , self.PMK )", "def set_tokenterminal_key(\n key: str, persist: bool = False, show_output: bool = False\n) -> str:\n handle_credential(\"API_TOKEN_TERMINAL_KEY\", key, persist)\n return check_tokenterminal_key(show_output)", "def configure_service_password_encryption(device):\n\n try:\n device.configure(\"service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not configure service password encryption\"\n )", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def dh_get_key():\n G = EcGroup()\n priv_dec = G.order().random()\n pub_enc = priv_dec * G.generator()\n return (G, priv_dec, pub_enc)", "def setup_service_key():\n if get_var('AFS_AKIMPERSONATE'):\n keytab = get_var('KRB_AFS_KEYTAB')\n if keytab and not os.path.exists(keytab):\n cell = get_var('AFS_CELL')\n realm = get_var('KRB_REALM')\n enctype = get_var('KRB_AFS_ENCTYPE')\n _KeytabKeywords().create_service_keytab(keytab, cell, realm, enctype, akimpersonate=True)\n if get_var('AFS_KEY_FILE') == 'KeyFile':\n run_keyword(\"Create Key File\")\n elif get_var('AFS_KEY_FILE') == 'rxkad.keytab':\n run_keyword(\"Install rxkad-k5 Keytab\")\n elif get_var('AFS_KEY_FILE') == 'KeyFileExt':\n run_keyword(\"Create Extended Key File\", get_var('KRB_AFS_ENCTYPE'))\n else:\n raise AssertionError(\"Unsupported AFS_KEY_FILE! %s\" % (get_var('AFS_KEY_FILE')))", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key" ]
[ "0.6332396", "0.5796015", "0.5760727", "0.5640943", "0.55775386", "0.5567757", "0.5502583", "0.5476372", "0.5373028", "0.51461244", "0.51198745", "0.50406486", "0.50342894", "0.49658433", "0.4959471", "0.49052796", "0.48931384", "0.47266513", "0.4723852", "0.4683759", "0.465386", "0.46328923", "0.4613472", "0.4613015", "0.45958245", "0.45823336", "0.4566973", "0.45629558", "0.45478776", "0.45339298", "0.45335257", "0.45210445", "0.45139736", "0.45072114", "0.4505753", "0.4497126", "0.44811794", "0.44719526", "0.4470338", "0.44684708", "0.44442004", "0.4432971", "0.44302458", "0.44274253", "0.44246644", "0.4406412", "0.43902707", "0.43893343", "0.438834", "0.4385244", "0.43835396", "0.4380524", "0.43796238", "0.4364405", "0.43638784", "0.4351339", "0.4329381", "0.43266487", "0.43228492", "0.4316095", "0.43121195", "0.4311172", "0.42976275", "0.42954782", "0.42935818", "0.42911547", "0.4286907", "0.42845678", "0.4284513", "0.42801914", "0.4274808", "0.42732754", "0.42684975", "0.42642227", "0.42506108", "0.4247065", "0.42445242", "0.42428955", "0.4242672", "0.42422855", "0.42366567", "0.42351958", "0.42328295", "0.4229068", "0.42288345", "0.42271173", "0.4222466", "0.4200895", "0.4200733", "0.41958866", "0.41887155", "0.41849205", "0.4184823", "0.41833827", "0.41832206", "0.41811115", "0.41727886", "0.41713166", "0.4169687", "0.41673273" ]
0.47746104
17
This operation is applicable to replica set instances and sharded cluster instances. You can call this operation to check whether resources are sufficient for creating an instance, upgrading a replica set or sharded cluster instance, or upgrading a single node of the sharded cluster instance. > You can call this operation a maximum of 200 times per minute.
def evaluate_resource_with_options( self, request: dds_20151201_models.EvaluateResourceRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.EvaluateResourceResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_class): query['DBInstanceClass'] = request.dbinstance_class if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.engine): query['Engine'] = request.engine if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.readonly_replicas): query['ReadonlyReplicas'] = request.readonly_replicas if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.replication_factor): query['ReplicationFactor'] = request.replication_factor if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.shards_info): query['ShardsInfo'] = request.shards_info if not UtilClient.is_unset(request.storage): query['Storage'] = request.storage if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='EvaluateResource', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.EvaluateResourceResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")", "def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)", "def test_create_cluster_resource_quota(self):\n pass", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def test_patch_cluster_resource_quota_status(self):\n pass", "def check_autoscaling_group_health(asg_name, current_capacity_count):\n if_verbose(\"Checking the health of ASG %s\" % asg_name)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for ASG health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on asg_instances count. A manual clean up is likely.\"\n\n completed_instances = 0\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n while(len(asg_instances) != current_capacity_count):\n if_verbose(\"Waiting for all of %s's instances (%d) to appear healthy\" % (asg_name, args.instance_count_step))\n time.sleep(args.update_timeout)\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n for instance in asg_instances:\n if_verbose(\"Progress of ASG instance %s: %s\" % (instance[\"InstanceId\"], instance[\"LifecycleState\"]))\n\n if instance[\"LifecycleState\"] == \"InService\":\n completed_instances += 1\n\n if completed_instances >= len(asg_instances):\n if_verbose(\"We have %d healthy nodes and we wanted %d - moving on.\" % (completed_instances, len(asg_instances)))\n break\n else:\n completed_instances = 0\n\n if_verbose(\"ASG %s is healthy\" % asg_name)\n return None", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_instance_limit1(self):\n pass", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def test_read_cluster_resource_quota_status(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def test_replace_cluster_resource_quota(self):\n pass", "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def test_redis_increase_replica_count_usual_case():", "def scale_up(nvms, valid_hostnames=None, vms_allegedly_running=0):\n\n global api, img, flavor, user_data, network, owned_instances\n\n # Try to get image if necessary\n if img is None:\n img = image(cf['api']['image_id'])\n if img is None:\n logging.error(\"Cannot scale up: image id %s not found\" % image(cf['api']['image_id']))\n return []\n\n n_succ = 0\n n_fail = 0\n logging.info(\"We need %d more VMs...\" % nvms)\n\n inst = running_instances(valid_hostnames)\n if inst is None:\n logging.error(\"No list of instances can be retrieved from API\")\n return []\n\n n_running_vms = len(inst) + vms_allegedly_running # number of *total* VMs running (also the ones *not* owned by HTCondor)\n if cf['quota']['max_vms'] >= 1:\n # We have a \"soft\" quota: respect it\n n_vms_to_start = int(min(nvms, cf['quota']['max_vms']-n_running_vms))\n if n_vms_to_start <= 0:\n logging.warning(\"Over quota (%d VMs already running out of %d): cannot launch any more VMs\" % \\\n (n_running_vms,cf['quota']['max_vms']))\n else:\n logging.warning(\"Quota enabled: requesting %d (out of desired %d) VMs\" % (n_vms_to_start,nvms))\n else:\n n_vms_to_start = int(nvms)\n\n # Launch VMs\n inst_ok = []\n for i in range(1, n_vms_to_start+1):\n\n success = False\n if int(cf['debug']['dry_run_boot_vms']) == 0:\n try:\n # Returns the reservation\n new_inst_id = img.run(\n token_id=api.keystone.token_id,\n key_name=cf['api']['key_name'],\n user_data=user_data,\n instance_type=flavor.id,\n network=network.id\n )\n\n # Get the single instance ID from the reservation\n owned_instances.append( new_inst_id )\n inst_ok.append( new_inst_id )\n\n success = True\n except Exception:\n logging.error(\"Cannot run instance via API: check your \\\"hard\\\" quota\")\n\n else:\n logging.info(\"Not running VM: dry run active\")\n success = True\n\n if success:\n n_succ+=1\n logging.info(\"VM launched OK. Requested: %d/%d | Success: %d | Failed: %d | ID: %s\" % \\\n (i, n_vms_to_start, n_succ, n_fail, new_inst_id))\n else:\n n_fail+=1\n logging.info(\"VM launch fail. Requested: %d/%d | Success: %d | Failed: %d\" % \\\n (i, n_vms_to_start, n_succ, n_fail))\n\n # Dump owned instances to file (if something changed)\n if n_succ > 0:\n save_owned_instances()\n\n return inst_ok", "def ec2_status(resource, metadata, return_count=False):\n\n instances = resource.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [metadata['fqdn']]},\n {'Name': 'instance-state-name', 'Values': ['pending', 'running']}, ])\n\n # get a count of the instances and then either return count or print results\n count = sum(1 for _ in instances)\n if return_count:\n # return count for conditional consumption in other functions\n return count\n else:\n # print for human consumption\n if count == 0:\n print(\"No instances running\")\n else:\n print(count, \"instances running\")\n print('{:20} {:15} {:22} {:18} {}'.format(\n 'instance_id', 'state', 'instance_name', 'public_ip_address', 'instance_role'))\n for instance in instances:\n # tags order does not deterministically stay from run to run and stored as list of dicts\n # tags = {instance.tags[0]['Key']: instance.tags[0]['Value'],\n # instance.tags[1]['Key']: instance.tags[1]['Value']}\n # probably there is a much better way to map this but let's make it a dict of tags\n tags = {}\n for tag in instance.tags:\n tags[tag['Key']] = tag['Value']\n\n print('{:20} {:15} {:22} {:18} {}'.format(\n instance.id, instance.state['Name'], tags['Name'],\n instance.public_ip_address, tags['Role']))", "def test_read_cluster_resource_quota(self):\n pass", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def check_owned_instance(st, instance_id):\n\n logging.info(\"Checking owned instance %s...\" % instance_id)\n\n global api, owned_instances\n\n # Get information from API: we need the IP address\n inst = api.nova.get_instance(token_id=api.keystone.token_id, instance_id=instance_id)\n\n # Check if the instance is in the list (using cached status)\n found = False\n for h in st['workers_status'].keys():\n if gethostbyname(h) == inst.network_ip(network_name=cf[\"api\"][\"network_name\"]):\n found = True\n break\n\n # Deal with errors\n if not found:\n logging.error(\"Instance %s (with IP %s) has not joined the cluster after %ds: terminating it\" % (instance_id, inst.private_ip_address, cf['elastiq']['estimated_vm_deploy_time_s']))\n\n try:\n inst.terminate(token_id=api.keystone.token_id)\n owned_instances.remove(instance_id)\n save_owned_instances()\n logging.info(\"Forcing shutdown of %s: OK\" % instance_id)\n except Exception as e:\n # Recheck in a while (10s) in case termination fails\n logging.error(\"Forcing shutdown of %s failed: rescheduling check\" % instance_id)\n return {\n 'action': 'check_owned_instance',\n 'when': time.time() + 10,\n 'params': [ instance_id ]\n }\n\n else:\n logging.debug(\"Instance %s (with IP %s) successfully joined the cluster within %ds\" % (instance_id, inst.network_ip(network_name=cf[\"api\"][\"network_name\"]), cf['elastiq']['estimated_vm_deploy_time_s']))\n\n return", "def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def test_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)", "def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)", "def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_list_cluster_resource_quota(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_instance_too_small_aws():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'instance-too-small-aws.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n cfg.validate()\n assert err.value.returncode == INPUT_ERROR\n print(err.value.message)\n assert 'does not have enough memory' in err.value.message", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def create(self):\n raise WufooException(\"InstanceResource creation not supported\")", "def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def check_availability(self):\n pass", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def check_elb_instance_health(elb_name, instances):\n if_verbose(\"Checking ELB %s instance health for %s\" % (elb_name, instances))\n timer = time.time()\n while (True):\n if_verbose(\"Sleeping for %d ELB instance health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired. A manual clean up is likely.\"\n\n healthy_elb_instances = 0\n elb_instances = elb.describe_instance_health(LoadBalancerName=elb_name, Instances=instances)\n for instance in elb_instances[\"InstanceStates\"]:\n if_verbose(\"Progress of ELB instance %s: %s\" % (instance[\"InstanceId\"], instance[\"State\"]))\n\n if instance[\"State\"] == \"InService\":\n healthy_elb_instances += 1\n\n if healthy_elb_instances == len(instances):\n break\n else:\n healthy_elb_instances = 0\n\n if_verbose(\"ELB %s is healthy with instances %s\" % (elb_name, elb_instances))\n return None", "def scale_up_autoscaling_group(asg_name, instance_count):\n if_verbose(\"Scaling up ASG %s to %d instances\" % (asg_name, instance_count))\n asg.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=instance_count)\n \n activities = []\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to come active\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities listing. A manual clean up is likely.\"\n\n activities = asg.describe_scaling_activities(AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step) \n \n if len(activities[\"Activities\"]) == args.instance_count_step:\n break\n\n activity_ids = [a[\"ActivityId\"] for a in activities[\"Activities\"]]\n\n if not len(activity_ids) > 0:\n return \"No activities found\" \n \n if_verbose(\"Activities found, checking them until complete or %ds timer expires\" % args.health_check_timeout)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to complete\" % args.update_timeout)\n time.sleep(args.update_timeout)\n \n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities check. A manual clean up is likely.\"\n\n completed_activities = 0\n activity_statuses = asg.describe_scaling_activities(ActivityIds=activity_ids, AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step)\n for activity in activity_statuses[\"Activities\"]:\n if_verbose(\"Progress of activity ID %s: %d\" % (activity[\"ActivityId\"], activity[\"Progress\"]))\n\n if activity[\"Progress\"] == 100:\n completed_activities += 1\n\n if completed_activities >= args.instance_count_step:\n break\n else:\n completed_activities = 0\n\n if_verbose(\"Scaling up of ASG %s successful\" % asg_name)\n return None", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def wait_for_instances(client, asg, desired_state=None, desired_health=None,\n desired_count=None):\n for i in range(61):\n if i == 60:\n raise Exception('Tried for 5 minutes, giving up.')\n sleep(10)\n _asg = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg['AutoScalingGroupName']],\n )['AutoScalingGroups'][0]\n\n if(\n desired_count is not None and\n len(_asg['Instances']) < desired_count\n ):\n continue\n\n # Check instance states\n all_matching = True\n for instance in _asg['Instances']:\n if(\n desired_state is not None and\n instance['LifecycleState'] != desired_state\n ):\n all_matching = False\n break\n if(\n desired_health is not None and\n instance['HealthStatus'] != desired_health\n ):\n all_matching = False\n break\n if all_matching:\n break", "def is_asg_scaled(asg_name, desired_capacity):\n logger.info('Checking asg {} instance count...'.format(asg_name))\n response = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name], MaxRecords=1\n )\n actual_instances = response['AutoScalingGroups'][0]['Instances']\n if len(actual_instances) != desired_capacity:\n logger.info('Asg {} does not have enough running instances to proceed'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = False\n else:\n logger.info('Asg {} scaled OK'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = True\n return is_scaled", "def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])", "def check_pacemaker_resource(self, resource_name, role, is_ha=True):\n n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, [role])\n d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)\n pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(\n d_ctrls[0].name, pure=True)['Online'])\n logger.info(\"pacemaker nodes are {0}\".format(pcm_nodes))\n resource_nodes = self.fuel_web.get_pacemaker_resource_location(\n d_ctrls[0].name, \"{}\".format(resource_name))\n if is_ha:\n for resource_node in resource_nodes:\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_node.name))\n config = self.fuel_web.get_pacemaker_config(resource_node.name)\n asserts.assert_not_equal(\n re.search(\n \"Clone Set: clone_{0} \\[{0}\\]\\s+Started: \\[ {1} \\]\".\n format(resource_name, pcm_nodes), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))\n else:\n asserts.assert_true(len(resource_nodes), 1)\n config = self.fuel_web.get_pacemaker_config(resource_nodes[0].name)\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_nodes[0].name))\n asserts.assert_not_equal(\n re.search(\"{0}\\s+\\(ocf::fuel:{1}\\):\\s+Started\".format(\n resource_name, resource_name.split(\"_\")[1]), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))", "def check_service_replication(\n instance_config,\n all_tasks,\n smartstack_replication_checker,\n):\n expected_count = instance_config.get_instances()\n log.info(\"Expecting %d total tasks for %s\" % (expected_count, instance_config.job_id))\n proxy_port = marathon_tools.get_proxy_port_for_instance(\n name=instance_config.service,\n instance=instance_config.instance,\n cluster=instance_config.cluster,\n soa_dir=instance_config.soa_dir,\n )\n\n registrations = instance_config.get_registrations()\n # if the primary registration does not match the service_instance name then\n # the best we can do is check marathon for replication (for now).\n if proxy_port is not None and registrations[0] == instance_config.job_id:\n check_smartstack_replication_for_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n smartstack_replication_checker=smartstack_replication_checker,\n )\n else:\n check_healthy_marathon_tasks_for_service_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n all_tasks=all_tasks,\n )", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def tcp_ping_nodes(self, timeout=20.0):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.check_version_request(timeout)", "def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))", "def check_vms(st):\n\n logging.info(\"Checking batch system's VMs...\")\n check_time = time.time()\n\n # Retrieve *all* running instances (also the non-owned ones) and filter out\n # statuses of workers which are not valid VMs: we are not interested in them\n rvms = running_instances()\n rvms2 = []\n\n rips = []\n if rvms is not None:\n for inst in rvms:\n ipv4 = inst.network_ip(network_name=cf[\"api\"][\"network_name\"])\n if ipv4 is not None:\n rips.append(ipv4)\n rvms2.append(inst)\n if len(rips) == 0:\n rips = None\n new_workers_status = BatchPlugin.poll_status( st['workers_status'], rips )\n\n rvms=rvms2\n\n if new_workers_status is not None:\n #logging.debug(new_workers_status)\n st['workers_status'] = new_workers_status\n new_workers_status = None\n\n hosts_shutdown = []\n for host,info in st['workers_status'].iteritems():\n if info['jobs'] != 0: continue\n if (check_time-info['unchangedsince']) > cf['elastiq']['idle_for_time_s']:\n logging.info(\"Host %s is idle for more than %ds: requesting shutdown\" % \\\n (host,cf['elastiq']['idle_for_time_s']))\n st['workers_status'][host]['unchangedsince'] = check_time # reset timer\n hosts_shutdown.append(host)\n\n if len(hosts_shutdown) > 0:\n inst_ok = scale_down(hosts_shutdown, valid_hostnames=st['workers_status'].keys())\n change_vms_allegedly_running(st, -len(inst_ok))\n\n # Scale up to reach the minimum quota, if any\n min_vms = cf['quota']['min_vms']\n if min_vms >= 1:\n rvms = running_instances(st['workers_status'].keys())\n if rvms is None:\n logging.warning(\"Cannot get list of running instances for honoring min quota of %d\" % min_vms)\n else:\n n_run = len(rvms)\n n_consider_run = n_run + st['vms_allegedly_running']\n logging.info(\"VMs: running=%d | allegedly running=%d | considering=%d\" % \\\n (n_run, st['vms_allegedly_running'], n_consider_run))\n n_vms = min_vms-n_consider_run\n if n_vms > 0:\n logging.info(\"Below minimum quota (%d VMs): requesting %d more VMs\" % \\\n (min_vms,n_vms))\n inst_ok = scale_up(n_vms, valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in inst_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n\n # OK: schedule when configured\n sched_when = time.time() + cf['elastiq']['check_vms_every_s']\n\n else:\n # Not OK: reschedule ASAP\n sched_when = 0\n\n return {\n 'action': 'check_vms',\n 'when': sched_when\n }", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')", "def test_update_hyperflex_cluster(self):\n pass", "def update_pvserver_instances(instances):\n status = {True: \"available\", False: \"in-use\"}\n for k, v in instances.items():\n is_available = is_pvserver_available(v[\"name\"], v[\"port\"])\n v[\"status\"] = status[is_available]", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def instance_outdated_age(instance_id, days_fresh):\n\n response = ec2_client.describe_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n\n instance_launch_time = response['Reservations'][0]['Instances'][0]['LaunchTime']\n\n # gets the age of a node by days only:\n instance_age = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).days)\n\n # gets the remaining age of a node in seconds (e.g. if node is y days and x seconds old this will only retrieve the x seconds):\n instance_age_remainder = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).seconds)\n\n if instance_age > days_fresh:\n logger.info(\"Instance id {} launch age of '{}' day(s) is older than expected '{}' day(s)\".format(instance_id, instance_age, days_fresh))\n return True\n elif (instance_age == days_fresh) and (instance_age_remainder > 0):\n logger.info(\"Instance id {} is older than expected '{}' day(s) by {} seconds.\".format(instance_id, days_fresh, instance_age_remainder))\n return True\n else:\n logger.info(\"Instance id {} : OK \".format(instance_id))\n return False", "def check_exactly_one_current_version(self):\n expected_state = \"CURRENT\"\n\n query = \"SELECT COUNT(*) FROM cluster_version;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.error(\"Unable to run query: {0}\".format(query))\n return\n\n count = result[0]\n if count == 0:\n msg = \"There are no cluster_versions. Start ambari-server, and then perform a Restart on one of the services.\\n\" + \\\n \"Then navigate to the \\\"Stacks and Versions > Versions\\\" page and ensure you can see the stack version.\\n\" + \\\n \"Next, restart all services, one-by-one, so that Ambari knows what version each component is running.\"\n Logger.warning(msg)\n elif count == 1:\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n\n repo_version_id = None\n repo_version = None\n cluster_version_state = None\n\n if result and len(result) == 3:\n repo_version_id = result[0]\n repo_version = result[1]\n cluster_version_state = result[2]\n\n if repo_version_id and repo_version and cluster_version_state:\n if cluster_version_state.upper() == expected_state:\n self.check_all_hosts(repo_version_id, repo_version)\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHDP STACK OVERVIEW\")\n\t Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Cluster HDP Version\\t{0}\".format(repo_version))\n Logger.info(\"Cluster State\\t{0}\".format(cluster_version_state))\n Logger.info(\"Ambari version\\t:{0}\".format(self.ambari_version))\n\n if self.ambari_server_user != \"root\" :\n Logger.info(\"Ambari Server as non-root?\\tYes\")\n else :\n Logger.info(\"Ambari Server as non-root?\\tNo\")\n\n # Read ambari-agent.ini file\n if os.path.exists(AMBARI_AGENT_INI):\n self.ambari_agent_props = self.read_conf_file(AMBARI_AGENT_INI)\n Logger.debug(\"Reading file {0}.\".format(self.ambari_agent_props))\n if \"run_as_user\" in self.ambari_agent_props:\n self.run_as_user = self.ambari_agent_props[\"run_as_user\"]\n if self.run_as_user != \"root\":\n Logger.info(\"Ambari Agent as non-root?\\tYes\")\n else:\n Logger.info(\"Ambari Agent as non-root?\\tNo\")\n else:\n Logger.error(\"Unable to read ambari-agent.ini file\")\n\n else:\n Logger.error(\"Cluster Version {0} should have a state of {1} but is {2}. Make sure to restart all of the Services.\".format(repo_version, expected_state, cluster_version_state))\n else:\n Logger.error(\"Unable to run query: {0}\".format(query))\n elif count > 1:\n # Ensure at least one Cluster Version is CURRENT\n Logger.info(\"Found multiple Cluster versions, checking that exactly one is {0}.\".format(expected_state))\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id WHERE cv.state = '{0}';\".format(expected_state)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n if rows:\n if len(rows) == 1:\n Logger.info(\"Good news; Cluster Version {0} has a state of {1}.\".format(rows[0][1], expected_state))\n self.check_all_hosts_current(rows[0][0], rows[0][1])\n elif len(rows) > 1:\n # Take the repo_version's version column\n repo_versions = [row[1] for row in rows if len(row) == 3]\n Logger.error(\"Found multiple cluster versions with a state of {0}, but only one should be {0}.\\n\" \\\n \"Will need to fix this manually, please contact Support. Cluster Versions found: {1}\".format(expected_state, \", \".join(repo_versions)))\n else:\n Logger.error(\"Unable to run query: {0}\\n\".format(query))\n pass", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def test_rebuilt_server_vcpus(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n server_actual_vcpus = remote_client.get_number_of_cpus()\n self.assertEqual(server_actual_vcpus, self.expected_vcpus)", "def check_fixed(self: AutoScaler) -> AutoScalerState:\n launched_size = len(self.clients)\n registered_size = Client.count_connected()\n task_count = Task.count_remaining()\n log.debug(f'Autoscale check (clients: {registered_size}/{launched_size}, tasks: {task_count})')\n if launched_size < self.min_size:\n log.debug(f'Autoscale min-size reached ({launched_size} < {self.min_size})')\n return AutoScalerState.SCALE\n if launched_size == 0 and task_count == 0:\n return AutoScalerState.WAIT\n if launched_size == 0 and task_count > 0:\n log.debug(f'Autoscale adding client ({task_count} tasks remaining)')\n return AutoScalerState.SCALE\n else:\n return AutoScalerState.WAIT", "def checkDBImportInstance(self, instance):\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tresult = (session.query(\n\t\t\t\tdbimportInstances.name\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tlogging.error(\"No DBImport Instance with that name can be found in table 'dbimport_instances'\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)", "def run_instances(self, image_id, min_count=1, max_count=1,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None,\r\n disable_api_termination=False,\r\n instance_initiated_shutdown_behavior=None,\r\n private_ip_address=None,\r\n placement_group=None, client_token=None,\r\n security_group_ids=None):\r\n params = {'ImageId':image_id,\r\n 'MinCount':min_count,\r\n 'MaxCount': max_count}\r\n if key_name:\r\n params['KeyName'] = key_name\r\n if security_group_ids:\r\n l = []\r\n for group in security_group_ids:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroupId')\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroup')\r\n if user_data:\r\n params['UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['AddressingType'] = addressing_type\r\n if instance_type:\r\n params['InstanceType'] = instance_type\r\n if placement:\r\n params['Placement.AvailabilityZone'] = placement\r\n if placement_group:\r\n params['Placement.GroupName'] = placement_group\r\n if kernel_id:\r\n params['KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['SubnetId'] = subnet_id\r\n if private_ip_address:\r\n params['PrivateIpAddress'] = private_ip_address\r\n if block_device_map:\r\n block_device_map.build_list_params(params)\r\n if disable_api_termination:\r\n params['DisableApiTermination'] = 'true'\r\n if instance_initiated_shutdown_behavior:\r\n val = instance_initiated_shutdown_behavior\r\n params['InstanceInitiatedShutdownBehavior'] = val\r\n if client_token:\r\n params['ClientToken'] = client_token\r\n return self.get_object('RunInstances', params, Reservation, verb='POST')", "def __init__(__self__, *,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input['DiskEncryptionConfigurationArgs']] = None,\n disk_encryption_status: Optional[pulumi.Input['DiskEncryptionStatusArgs']] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input['InstanceFailoverReplicaArgs']] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input['OnPremisesConfigurationArgs']] = None,\n out_of_disk_report: Optional[pulumi.Input['SqlOutOfDiskReportArgs']] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input['ReplicaConfigurationArgs']] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input['SqlScheduledMaintenanceArgs']] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input['SslCertArgs']] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input['SettingsArgs']] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None):\n if backend_type is not None:\n pulumi.set(__self__, \"backend_type\", backend_type)\n if connection_name is not None:\n pulumi.set(__self__, \"connection_name\", connection_name)\n if current_disk_size is not None:\n pulumi.set(__self__, \"current_disk_size\", current_disk_size)\n if database_version is not None:\n pulumi.set(__self__, \"database_version\", database_version)\n if disk_encryption_configuration is not None:\n pulumi.set(__self__, \"disk_encryption_configuration\", disk_encryption_configuration)\n if disk_encryption_status is not None:\n pulumi.set(__self__, \"disk_encryption_status\", disk_encryption_status)\n if etag is not None:\n warnings.warn(\"\"\"This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"etag is deprecated: This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\")\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if failover_replica is not None:\n pulumi.set(__self__, \"failover_replica\", failover_replica)\n if gce_zone is not None:\n pulumi.set(__self__, \"gce_zone\", gce_zone)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_addresses is not None:\n pulumi.set(__self__, \"ip_addresses\", ip_addresses)\n if ipv6_address is not None:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if maintenance_version is not None:\n pulumi.set(__self__, \"maintenance_version\", maintenance_version)\n if master_instance_name is not None:\n pulumi.set(__self__, \"master_instance_name\", master_instance_name)\n if max_disk_size is not None:\n pulumi.set(__self__, \"max_disk_size\", max_disk_size)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if on_premises_configuration is not None:\n pulumi.set(__self__, \"on_premises_configuration\", on_premises_configuration)\n if out_of_disk_report is not None:\n pulumi.set(__self__, \"out_of_disk_report\", out_of_disk_report)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if replica_configuration is not None:\n pulumi.set(__self__, \"replica_configuration\", replica_configuration)\n if replica_names is not None:\n pulumi.set(__self__, \"replica_names\", replica_names)\n if root_password is not None:\n pulumi.set(__self__, \"root_password\", root_password)\n if satisfies_pzs is not None:\n pulumi.set(__self__, \"satisfies_pzs\", satisfies_pzs)\n if scheduled_maintenance is not None:\n pulumi.set(__self__, \"scheduled_maintenance\", scheduled_maintenance)\n if secondary_gce_zone is not None:\n pulumi.set(__self__, \"secondary_gce_zone\", secondary_gce_zone)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if server_ca_cert is not None:\n pulumi.set(__self__, \"server_ca_cert\", server_ca_cert)\n if service_account_email_address is not None:\n pulumi.set(__self__, \"service_account_email_address\", service_account_email_address)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if suspension_reason is not None:\n pulumi.set(__self__, \"suspension_reason\", suspension_reason)", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def check_runtime_vm(**kwargs):\n\n ti: TaskInstance = kwargs[\"ti\"]\n last_warning_time = ti.xcom_pull(\n key=TerraformTasks.XCOM_WARNING_TIME,\n task_ids=TerraformTasks.TASK_ID_VM_RUNTIME,\n dag_id=TerraformTasks.DAG_ID_DESTROY_VM,\n include_prior_dates=True,\n )\n start_time_vm = ti.xcom_pull(\n key=TerraformTasks.XCOM_START_TIME_VM,\n task_ids=TerraformTasks.TASK_ID_RUN,\n dag_id=TerraformTasks.DAG_ID_CREATE_VM,\n include_prior_dates=True,\n )\n\n if start_time_vm:\n # calculate number of hours passed since start time vm and now\n hours_on = (ti.start_date - start_time_vm).total_seconds() / 3600\n logging.info(\n f\"Start time VM: {start_time_vm}, hours passed since start time: {hours_on}, warning limit: \"\n f\"{TerraformTasks.VM_RUNTIME_H_WARNING}\"\n )\n\n # check if a warning has been sent previously and if so, how many hours ago\n if last_warning_time:\n hours_since_warning = (ti.start_date - last_warning_time).total_seconds() / 3600\n else:\n hours_since_warning = None\n\n # check if the VM has been on longer than the limit\n if hours_on > TerraformTasks.VM_RUNTIME_H_WARNING:\n # check if no warning was sent before or last time was longer ago than warning frequency\n if not hours_since_warning or hours_since_warning > TerraformTasks.WARNING_FREQUENCY_H:\n comments = (\n f\"Worker VM has been on since {start_time_vm}. No. hours passed since then: \"\n f\"{hours_on}.\"\n f\" Warning limit: {TerraformTasks.VM_RUNTIME_H_WARNING}H\"\n )\n project_id = Variable.get(AirflowVars.PROJECT_ID)\n slack_hook = create_slack_webhook(comments, project_id, **kwargs)\n\n # http_hook outputs the secret token, suppressing logging 'info' by setting level to 'warning'\n old_levels = change_task_log_level(logging.WARNING)\n slack_hook.execute()\n # change back to previous levels\n change_task_log_level(old_levels)\n\n ti.xcom_push(TerraformTasks.XCOM_WARNING_TIME, ti.start_date)\n else:\n logging.info(f\"Start time VM unknown.\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,\n base_instance_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution_policy_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n named_ports: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n stateful_disks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,\n target_pools: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n target_size: Optional[pulumi.Input[float]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,\n versions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,\n wait_for_instances: Optional[pulumi.Input[bool]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['auto_healing_policies'] = auto_healing_policies\n if base_instance_name is None:\n raise TypeError(\"Missing required property 'base_instance_name'\")\n __props__['base_instance_name'] = base_instance_name\n __props__['description'] = description\n __props__['distribution_policy_zones'] = distribution_policy_zones\n __props__['name'] = name\n __props__['named_ports'] = named_ports\n __props__['project'] = project\n if region is None:\n raise TypeError(\"Missing required property 'region'\")\n __props__['region'] = region\n __props__['stateful_disks'] = stateful_disks\n __props__['target_pools'] = target_pools\n __props__['target_size'] = target_size\n __props__['update_policy'] = update_policy\n if versions is None:\n raise TypeError(\"Missing required property 'versions'\")\n __props__['versions'] = versions\n __props__['wait_for_instances'] = wait_for_instances\n __props__['fingerprint'] = None\n __props__['instance_group'] = None\n __props__['self_link'] = None\n super(RegionInstanceGroupManager, __self__).__init__(\n 'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',\n resource_name,\n __props__,\n opts)", "def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}", "def check_stability(self):", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def test_delete_cluster_resource_quota(self):\n pass", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def update(self, validate=False):\r\n rs = self.connection.get_all_dbinstances(self.id)\r\n if len(rs) > 0:\r\n for i in rs:\r\n if i.id == self.id:\r\n self.__dict__.update(i.__dict__)\r\n elif validate:\r\n raise ValueError('%s is not a valid Instance ID' % self.id)\r\n return self.status", "def db_healthcheck() -> bool:\n\n try:\n result = query_db(\"Select 1\")\n app.logfile.info(\"Select 1\")\n return True\n except ConnectionError as err:\n app.logger.error(err)\n return False" ]
[ "0.61977696", "0.59943205", "0.5993166", "0.5947614", "0.59099776", "0.5793798", "0.5754149", "0.57099897", "0.56931806", "0.5669819", "0.5640985", "0.5618391", "0.55936277", "0.5537415", "0.5518622", "0.54929787", "0.5492541", "0.54782796", "0.5477664", "0.54595304", "0.54503506", "0.5448645", "0.5402863", "0.5379081", "0.5374825", "0.5368817", "0.5363126", "0.5359722", "0.53289676", "0.53256845", "0.5297527", "0.52857167", "0.5284727", "0.527614", "0.527284", "0.5258998", "0.52584803", "0.52582496", "0.5245029", "0.5219436", "0.5204467", "0.52035713", "0.52010155", "0.5199581", "0.51962185", "0.5189876", "0.5187886", "0.51823515", "0.51798314", "0.5174562", "0.5171759", "0.516838", "0.5165408", "0.5149708", "0.5148164", "0.5147531", "0.51457584", "0.5131391", "0.51281106", "0.512661", "0.5123801", "0.512119", "0.5103619", "0.5098898", "0.50973976", "0.50894976", "0.5088131", "0.5071653", "0.50690156", "0.50673836", "0.5061204", "0.50485796", "0.5046665", "0.5044445", "0.504355", "0.503482", "0.50327575", "0.50319046", "0.50198346", "0.5018546", "0.5018259", "0.50179106", "0.5016462", "0.5011347", "0.49951693", "0.49837384", "0.49679372", "0.4964339", "0.4963195", "0.49599385", "0.49577937", "0.49474788", "0.49328753", "0.49312326", "0.4926949", "0.4923914", "0.49226472", "0.49210218", "0.49134505", "0.49060914", "0.4904296" ]
0.0
-1
This operation is applicable to replica set instances and sharded cluster instances. You can call this operation to check whether resources are sufficient for creating an instance, upgrading a replica set or sharded cluster instance, or upgrading a single node of the sharded cluster instance. > You can call this operation a maximum of 200 times per minute.
async def evaluate_resource_with_options_async( self, request: dds_20151201_models.EvaluateResourceRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.EvaluateResourceResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_class): query['DBInstanceClass'] = request.dbinstance_class if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.engine): query['Engine'] = request.engine if not UtilClient.is_unset(request.engine_version): query['EngineVersion'] = request.engine_version if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.readonly_replicas): query['ReadonlyReplicas'] = request.readonly_replicas if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.replication_factor): query['ReplicationFactor'] = request.replication_factor if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.shards_info): query['ShardsInfo'] = request.shards_info if not UtilClient.is_unset(request.storage): query['Storage'] = request.storage if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='EvaluateResource', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.EvaluateResourceResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")", "def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)", "def test_create_cluster_resource_quota(self):\n pass", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def test_patch_cluster_resource_quota_status(self):\n pass", "def check_autoscaling_group_health(asg_name, current_capacity_count):\n if_verbose(\"Checking the health of ASG %s\" % asg_name)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for ASG health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on asg_instances count. A manual clean up is likely.\"\n\n completed_instances = 0\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n while(len(asg_instances) != current_capacity_count):\n if_verbose(\"Waiting for all of %s's instances (%d) to appear healthy\" % (asg_name, args.instance_count_step))\n time.sleep(args.update_timeout)\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n for instance in asg_instances:\n if_verbose(\"Progress of ASG instance %s: %s\" % (instance[\"InstanceId\"], instance[\"LifecycleState\"]))\n\n if instance[\"LifecycleState\"] == \"InService\":\n completed_instances += 1\n\n if completed_instances >= len(asg_instances):\n if_verbose(\"We have %d healthy nodes and we wanted %d - moving on.\" % (completed_instances, len(asg_instances)))\n break\n else:\n completed_instances = 0\n\n if_verbose(\"ASG %s is healthy\" % asg_name)\n return None", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_instance_limit1(self):\n pass", "def test_read_cluster_resource_quota_status(self):\n pass", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def test_patch_cluster_resource_quota(self):\n pass", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def test_replace_cluster_resource_quota(self):\n pass", "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def test_redis_increase_replica_count_usual_case():", "def scale_up(nvms, valid_hostnames=None, vms_allegedly_running=0):\n\n global api, img, flavor, user_data, network, owned_instances\n\n # Try to get image if necessary\n if img is None:\n img = image(cf['api']['image_id'])\n if img is None:\n logging.error(\"Cannot scale up: image id %s not found\" % image(cf['api']['image_id']))\n return []\n\n n_succ = 0\n n_fail = 0\n logging.info(\"We need %d more VMs...\" % nvms)\n\n inst = running_instances(valid_hostnames)\n if inst is None:\n logging.error(\"No list of instances can be retrieved from API\")\n return []\n\n n_running_vms = len(inst) + vms_allegedly_running # number of *total* VMs running (also the ones *not* owned by HTCondor)\n if cf['quota']['max_vms'] >= 1:\n # We have a \"soft\" quota: respect it\n n_vms_to_start = int(min(nvms, cf['quota']['max_vms']-n_running_vms))\n if n_vms_to_start <= 0:\n logging.warning(\"Over quota (%d VMs already running out of %d): cannot launch any more VMs\" % \\\n (n_running_vms,cf['quota']['max_vms']))\n else:\n logging.warning(\"Quota enabled: requesting %d (out of desired %d) VMs\" % (n_vms_to_start,nvms))\n else:\n n_vms_to_start = int(nvms)\n\n # Launch VMs\n inst_ok = []\n for i in range(1, n_vms_to_start+1):\n\n success = False\n if int(cf['debug']['dry_run_boot_vms']) == 0:\n try:\n # Returns the reservation\n new_inst_id = img.run(\n token_id=api.keystone.token_id,\n key_name=cf['api']['key_name'],\n user_data=user_data,\n instance_type=flavor.id,\n network=network.id\n )\n\n # Get the single instance ID from the reservation\n owned_instances.append( new_inst_id )\n inst_ok.append( new_inst_id )\n\n success = True\n except Exception:\n logging.error(\"Cannot run instance via API: check your \\\"hard\\\" quota\")\n\n else:\n logging.info(\"Not running VM: dry run active\")\n success = True\n\n if success:\n n_succ+=1\n logging.info(\"VM launched OK. Requested: %d/%d | Success: %d | Failed: %d | ID: %s\" % \\\n (i, n_vms_to_start, n_succ, n_fail, new_inst_id))\n else:\n n_fail+=1\n logging.info(\"VM launch fail. Requested: %d/%d | Success: %d | Failed: %d\" % \\\n (i, n_vms_to_start, n_succ, n_fail))\n\n # Dump owned instances to file (if something changed)\n if n_succ > 0:\n save_owned_instances()\n\n return inst_ok", "def ec2_status(resource, metadata, return_count=False):\n\n instances = resource.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [metadata['fqdn']]},\n {'Name': 'instance-state-name', 'Values': ['pending', 'running']}, ])\n\n # get a count of the instances and then either return count or print results\n count = sum(1 for _ in instances)\n if return_count:\n # return count for conditional consumption in other functions\n return count\n else:\n # print for human consumption\n if count == 0:\n print(\"No instances running\")\n else:\n print(count, \"instances running\")\n print('{:20} {:15} {:22} {:18} {}'.format(\n 'instance_id', 'state', 'instance_name', 'public_ip_address', 'instance_role'))\n for instance in instances:\n # tags order does not deterministically stay from run to run and stored as list of dicts\n # tags = {instance.tags[0]['Key']: instance.tags[0]['Value'],\n # instance.tags[1]['Key']: instance.tags[1]['Value']}\n # probably there is a much better way to map this but let's make it a dict of tags\n tags = {}\n for tag in instance.tags:\n tags[tag['Key']] = tag['Value']\n\n print('{:20} {:15} {:22} {:18} {}'.format(\n instance.id, instance.state['Name'], tags['Name'],\n instance.public_ip_address, tags['Role']))", "def test_read_cluster_resource_quota(self):\n pass", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def check_owned_instance(st, instance_id):\n\n logging.info(\"Checking owned instance %s...\" % instance_id)\n\n global api, owned_instances\n\n # Get information from API: we need the IP address\n inst = api.nova.get_instance(token_id=api.keystone.token_id, instance_id=instance_id)\n\n # Check if the instance is in the list (using cached status)\n found = False\n for h in st['workers_status'].keys():\n if gethostbyname(h) == inst.network_ip(network_name=cf[\"api\"][\"network_name\"]):\n found = True\n break\n\n # Deal with errors\n if not found:\n logging.error(\"Instance %s (with IP %s) has not joined the cluster after %ds: terminating it\" % (instance_id, inst.private_ip_address, cf['elastiq']['estimated_vm_deploy_time_s']))\n\n try:\n inst.terminate(token_id=api.keystone.token_id)\n owned_instances.remove(instance_id)\n save_owned_instances()\n logging.info(\"Forcing shutdown of %s: OK\" % instance_id)\n except Exception as e:\n # Recheck in a while (10s) in case termination fails\n logging.error(\"Forcing shutdown of %s failed: rescheduling check\" % instance_id)\n return {\n 'action': 'check_owned_instance',\n 'when': time.time() + 10,\n 'params': [ instance_id ]\n }\n\n else:\n logging.debug(\"Instance %s (with IP %s) successfully joined the cluster within %ds\" % (instance_id, inst.network_ip(network_name=cf[\"api\"][\"network_name\"]), cf['elastiq']['estimated_vm_deploy_time_s']))\n\n return", "def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)", "def test_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)", "def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_list_cluster_resource_quota(self):\n pass", "def test_instance_too_small_aws():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'instance-too-small-aws.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n cfg.validate()\n assert err.value.returncode == INPUT_ERROR\n print(err.value.message)\n assert 'does not have enough memory' in err.value.message", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def create(self):\n raise WufooException(\"InstanceResource creation not supported\")", "def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def check_availability(self):\n pass", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def check_elb_instance_health(elb_name, instances):\n if_verbose(\"Checking ELB %s instance health for %s\" % (elb_name, instances))\n timer = time.time()\n while (True):\n if_verbose(\"Sleeping for %d ELB instance health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired. A manual clean up is likely.\"\n\n healthy_elb_instances = 0\n elb_instances = elb.describe_instance_health(LoadBalancerName=elb_name, Instances=instances)\n for instance in elb_instances[\"InstanceStates\"]:\n if_verbose(\"Progress of ELB instance %s: %s\" % (instance[\"InstanceId\"], instance[\"State\"]))\n\n if instance[\"State\"] == \"InService\":\n healthy_elb_instances += 1\n\n if healthy_elb_instances == len(instances):\n break\n else:\n healthy_elb_instances = 0\n\n if_verbose(\"ELB %s is healthy with instances %s\" % (elb_name, elb_instances))\n return None", "def scale_up_autoscaling_group(asg_name, instance_count):\n if_verbose(\"Scaling up ASG %s to %d instances\" % (asg_name, instance_count))\n asg.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=instance_count)\n \n activities = []\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to come active\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities listing. A manual clean up is likely.\"\n\n activities = asg.describe_scaling_activities(AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step) \n \n if len(activities[\"Activities\"]) == args.instance_count_step:\n break\n\n activity_ids = [a[\"ActivityId\"] for a in activities[\"Activities\"]]\n\n if not len(activity_ids) > 0:\n return \"No activities found\" \n \n if_verbose(\"Activities found, checking them until complete or %ds timer expires\" % args.health_check_timeout)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to complete\" % args.update_timeout)\n time.sleep(args.update_timeout)\n \n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities check. A manual clean up is likely.\"\n\n completed_activities = 0\n activity_statuses = asg.describe_scaling_activities(ActivityIds=activity_ids, AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step)\n for activity in activity_statuses[\"Activities\"]:\n if_verbose(\"Progress of activity ID %s: %d\" % (activity[\"ActivityId\"], activity[\"Progress\"]))\n\n if activity[\"Progress\"] == 100:\n completed_activities += 1\n\n if completed_activities >= args.instance_count_step:\n break\n else:\n completed_activities = 0\n\n if_verbose(\"Scaling up of ASG %s successful\" % asg_name)\n return None", "def wait_for_instances(client, asg, desired_state=None, desired_health=None,\n desired_count=None):\n for i in range(61):\n if i == 60:\n raise Exception('Tried for 5 minutes, giving up.')\n sleep(10)\n _asg = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg['AutoScalingGroupName']],\n )['AutoScalingGroups'][0]\n\n if(\n desired_count is not None and\n len(_asg['Instances']) < desired_count\n ):\n continue\n\n # Check instance states\n all_matching = True\n for instance in _asg['Instances']:\n if(\n desired_state is not None and\n instance['LifecycleState'] != desired_state\n ):\n all_matching = False\n break\n if(\n desired_health is not None and\n instance['HealthStatus'] != desired_health\n ):\n all_matching = False\n break\n if all_matching:\n break", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def is_asg_scaled(asg_name, desired_capacity):\n logger.info('Checking asg {} instance count...'.format(asg_name))\n response = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name], MaxRecords=1\n )\n actual_instances = response['AutoScalingGroups'][0]['Instances']\n if len(actual_instances) != desired_capacity:\n logger.info('Asg {} does not have enough running instances to proceed'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = False\n else:\n logger.info('Asg {} scaled OK'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = True\n return is_scaled", "def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])", "def check_service_replication(\n instance_config,\n all_tasks,\n smartstack_replication_checker,\n):\n expected_count = instance_config.get_instances()\n log.info(\"Expecting %d total tasks for %s\" % (expected_count, instance_config.job_id))\n proxy_port = marathon_tools.get_proxy_port_for_instance(\n name=instance_config.service,\n instance=instance_config.instance,\n cluster=instance_config.cluster,\n soa_dir=instance_config.soa_dir,\n )\n\n registrations = instance_config.get_registrations()\n # if the primary registration does not match the service_instance name then\n # the best we can do is check marathon for replication (for now).\n if proxy_port is not None and registrations[0] == instance_config.job_id:\n check_smartstack_replication_for_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n smartstack_replication_checker=smartstack_replication_checker,\n )\n else:\n check_healthy_marathon_tasks_for_service_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n all_tasks=all_tasks,\n )", "def check_pacemaker_resource(self, resource_name, role, is_ha=True):\n n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, [role])\n d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)\n pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(\n d_ctrls[0].name, pure=True)['Online'])\n logger.info(\"pacemaker nodes are {0}\".format(pcm_nodes))\n resource_nodes = self.fuel_web.get_pacemaker_resource_location(\n d_ctrls[0].name, \"{}\".format(resource_name))\n if is_ha:\n for resource_node in resource_nodes:\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_node.name))\n config = self.fuel_web.get_pacemaker_config(resource_node.name)\n asserts.assert_not_equal(\n re.search(\n \"Clone Set: clone_{0} \\[{0}\\]\\s+Started: \\[ {1} \\]\".\n format(resource_name, pcm_nodes), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))\n else:\n asserts.assert_true(len(resource_nodes), 1)\n config = self.fuel_web.get_pacemaker_config(resource_nodes[0].name)\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_nodes[0].name))\n asserts.assert_not_equal(\n re.search(\"{0}\\s+\\(ocf::fuel:{1}\\):\\s+Started\".format(\n resource_name, resource_name.split(\"_\")[1]), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def tcp_ping_nodes(self, timeout=20.0):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.check_version_request(timeout)", "def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))", "def check_vms(st):\n\n logging.info(\"Checking batch system's VMs...\")\n check_time = time.time()\n\n # Retrieve *all* running instances (also the non-owned ones) and filter out\n # statuses of workers which are not valid VMs: we are not interested in them\n rvms = running_instances()\n rvms2 = []\n\n rips = []\n if rvms is not None:\n for inst in rvms:\n ipv4 = inst.network_ip(network_name=cf[\"api\"][\"network_name\"])\n if ipv4 is not None:\n rips.append(ipv4)\n rvms2.append(inst)\n if len(rips) == 0:\n rips = None\n new_workers_status = BatchPlugin.poll_status( st['workers_status'], rips )\n\n rvms=rvms2\n\n if new_workers_status is not None:\n #logging.debug(new_workers_status)\n st['workers_status'] = new_workers_status\n new_workers_status = None\n\n hosts_shutdown = []\n for host,info in st['workers_status'].iteritems():\n if info['jobs'] != 0: continue\n if (check_time-info['unchangedsince']) > cf['elastiq']['idle_for_time_s']:\n logging.info(\"Host %s is idle for more than %ds: requesting shutdown\" % \\\n (host,cf['elastiq']['idle_for_time_s']))\n st['workers_status'][host]['unchangedsince'] = check_time # reset timer\n hosts_shutdown.append(host)\n\n if len(hosts_shutdown) > 0:\n inst_ok = scale_down(hosts_shutdown, valid_hostnames=st['workers_status'].keys())\n change_vms_allegedly_running(st, -len(inst_ok))\n\n # Scale up to reach the minimum quota, if any\n min_vms = cf['quota']['min_vms']\n if min_vms >= 1:\n rvms = running_instances(st['workers_status'].keys())\n if rvms is None:\n logging.warning(\"Cannot get list of running instances for honoring min quota of %d\" % min_vms)\n else:\n n_run = len(rvms)\n n_consider_run = n_run + st['vms_allegedly_running']\n logging.info(\"VMs: running=%d | allegedly running=%d | considering=%d\" % \\\n (n_run, st['vms_allegedly_running'], n_consider_run))\n n_vms = min_vms-n_consider_run\n if n_vms > 0:\n logging.info(\"Below minimum quota (%d VMs): requesting %d more VMs\" % \\\n (min_vms,n_vms))\n inst_ok = scale_up(n_vms, valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in inst_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n\n # OK: schedule when configured\n sched_when = time.time() + cf['elastiq']['check_vms_every_s']\n\n else:\n # Not OK: reschedule ASAP\n sched_when = 0\n\n return {\n 'action': 'check_vms',\n 'when': sched_when\n }", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def test_update_hyperflex_cluster(self):\n pass", "def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')", "def update_pvserver_instances(instances):\n status = {True: \"available\", False: \"in-use\"}\n for k, v in instances.items():\n is_available = is_pvserver_available(v[\"name\"], v[\"port\"])\n v[\"status\"] = status[is_available]", "def instance_outdated_age(instance_id, days_fresh):\n\n response = ec2_client.describe_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n\n instance_launch_time = response['Reservations'][0]['Instances'][0]['LaunchTime']\n\n # gets the age of a node by days only:\n instance_age = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).days)\n\n # gets the remaining age of a node in seconds (e.g. if node is y days and x seconds old this will only retrieve the x seconds):\n instance_age_remainder = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).seconds)\n\n if instance_age > days_fresh:\n logger.info(\"Instance id {} launch age of '{}' day(s) is older than expected '{}' day(s)\".format(instance_id, instance_age, days_fresh))\n return True\n elif (instance_age == days_fresh) and (instance_age_remainder > 0):\n logger.info(\"Instance id {} is older than expected '{}' day(s) by {} seconds.\".format(instance_id, days_fresh, instance_age_remainder))\n return True\n else:\n logger.info(\"Instance id {} : OK \".format(instance_id))\n return False", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def check_exactly_one_current_version(self):\n expected_state = \"CURRENT\"\n\n query = \"SELECT COUNT(*) FROM cluster_version;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.error(\"Unable to run query: {0}\".format(query))\n return\n\n count = result[0]\n if count == 0:\n msg = \"There are no cluster_versions. Start ambari-server, and then perform a Restart on one of the services.\\n\" + \\\n \"Then navigate to the \\\"Stacks and Versions > Versions\\\" page and ensure you can see the stack version.\\n\" + \\\n \"Next, restart all services, one-by-one, so that Ambari knows what version each component is running.\"\n Logger.warning(msg)\n elif count == 1:\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n\n repo_version_id = None\n repo_version = None\n cluster_version_state = None\n\n if result and len(result) == 3:\n repo_version_id = result[0]\n repo_version = result[1]\n cluster_version_state = result[2]\n\n if repo_version_id and repo_version and cluster_version_state:\n if cluster_version_state.upper() == expected_state:\n self.check_all_hosts(repo_version_id, repo_version)\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHDP STACK OVERVIEW\")\n\t Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Cluster HDP Version\\t{0}\".format(repo_version))\n Logger.info(\"Cluster State\\t{0}\".format(cluster_version_state))\n Logger.info(\"Ambari version\\t:{0}\".format(self.ambari_version))\n\n if self.ambari_server_user != \"root\" :\n Logger.info(\"Ambari Server as non-root?\\tYes\")\n else :\n Logger.info(\"Ambari Server as non-root?\\tNo\")\n\n # Read ambari-agent.ini file\n if os.path.exists(AMBARI_AGENT_INI):\n self.ambari_agent_props = self.read_conf_file(AMBARI_AGENT_INI)\n Logger.debug(\"Reading file {0}.\".format(self.ambari_agent_props))\n if \"run_as_user\" in self.ambari_agent_props:\n self.run_as_user = self.ambari_agent_props[\"run_as_user\"]\n if self.run_as_user != \"root\":\n Logger.info(\"Ambari Agent as non-root?\\tYes\")\n else:\n Logger.info(\"Ambari Agent as non-root?\\tNo\")\n else:\n Logger.error(\"Unable to read ambari-agent.ini file\")\n\n else:\n Logger.error(\"Cluster Version {0} should have a state of {1} but is {2}. Make sure to restart all of the Services.\".format(repo_version, expected_state, cluster_version_state))\n else:\n Logger.error(\"Unable to run query: {0}\".format(query))\n elif count > 1:\n # Ensure at least one Cluster Version is CURRENT\n Logger.info(\"Found multiple Cluster versions, checking that exactly one is {0}.\".format(expected_state))\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id WHERE cv.state = '{0}';\".format(expected_state)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n if rows:\n if len(rows) == 1:\n Logger.info(\"Good news; Cluster Version {0} has a state of {1}.\".format(rows[0][1], expected_state))\n self.check_all_hosts_current(rows[0][0], rows[0][1])\n elif len(rows) > 1:\n # Take the repo_version's version column\n repo_versions = [row[1] for row in rows if len(row) == 3]\n Logger.error(\"Found multiple cluster versions with a state of {0}, but only one should be {0}.\\n\" \\\n \"Will need to fix this manually, please contact Support. Cluster Versions found: {1}\".format(expected_state, \", \".join(repo_versions)))\n else:\n Logger.error(\"Unable to run query: {0}\\n\".format(query))\n pass", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def test_rebuilt_server_vcpus(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n server_actual_vcpus = remote_client.get_number_of_cpus()\n self.assertEqual(server_actual_vcpus, self.expected_vcpus)", "def checkDBImportInstance(self, instance):\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tresult = (session.query(\n\t\t\t\tdbimportInstances.name\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tlogging.error(\"No DBImport Instance with that name can be found in table 'dbimport_instances'\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)", "def check_fixed(self: AutoScaler) -> AutoScalerState:\n launched_size = len(self.clients)\n registered_size = Client.count_connected()\n task_count = Task.count_remaining()\n log.debug(f'Autoscale check (clients: {registered_size}/{launched_size}, tasks: {task_count})')\n if launched_size < self.min_size:\n log.debug(f'Autoscale min-size reached ({launched_size} < {self.min_size})')\n return AutoScalerState.SCALE\n if launched_size == 0 and task_count == 0:\n return AutoScalerState.WAIT\n if launched_size == 0 and task_count > 0:\n log.debug(f'Autoscale adding client ({task_count} tasks remaining)')\n return AutoScalerState.SCALE\n else:\n return AutoScalerState.WAIT", "def run_instances(self, image_id, min_count=1, max_count=1,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None,\r\n disable_api_termination=False,\r\n instance_initiated_shutdown_behavior=None,\r\n private_ip_address=None,\r\n placement_group=None, client_token=None,\r\n security_group_ids=None):\r\n params = {'ImageId':image_id,\r\n 'MinCount':min_count,\r\n 'MaxCount': max_count}\r\n if key_name:\r\n params['KeyName'] = key_name\r\n if security_group_ids:\r\n l = []\r\n for group in security_group_ids:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroupId')\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroup')\r\n if user_data:\r\n params['UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['AddressingType'] = addressing_type\r\n if instance_type:\r\n params['InstanceType'] = instance_type\r\n if placement:\r\n params['Placement.AvailabilityZone'] = placement\r\n if placement_group:\r\n params['Placement.GroupName'] = placement_group\r\n if kernel_id:\r\n params['KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['SubnetId'] = subnet_id\r\n if private_ip_address:\r\n params['PrivateIpAddress'] = private_ip_address\r\n if block_device_map:\r\n block_device_map.build_list_params(params)\r\n if disable_api_termination:\r\n params['DisableApiTermination'] = 'true'\r\n if instance_initiated_shutdown_behavior:\r\n val = instance_initiated_shutdown_behavior\r\n params['InstanceInitiatedShutdownBehavior'] = val\r\n if client_token:\r\n params['ClientToken'] = client_token\r\n return self.get_object('RunInstances', params, Reservation, verb='POST')", "def __init__(__self__, *,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input['DiskEncryptionConfigurationArgs']] = None,\n disk_encryption_status: Optional[pulumi.Input['DiskEncryptionStatusArgs']] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input['InstanceFailoverReplicaArgs']] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input['OnPremisesConfigurationArgs']] = None,\n out_of_disk_report: Optional[pulumi.Input['SqlOutOfDiskReportArgs']] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input['ReplicaConfigurationArgs']] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input['SqlScheduledMaintenanceArgs']] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input['SslCertArgs']] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input['SettingsArgs']] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None):\n if backend_type is not None:\n pulumi.set(__self__, \"backend_type\", backend_type)\n if connection_name is not None:\n pulumi.set(__self__, \"connection_name\", connection_name)\n if current_disk_size is not None:\n pulumi.set(__self__, \"current_disk_size\", current_disk_size)\n if database_version is not None:\n pulumi.set(__self__, \"database_version\", database_version)\n if disk_encryption_configuration is not None:\n pulumi.set(__self__, \"disk_encryption_configuration\", disk_encryption_configuration)\n if disk_encryption_status is not None:\n pulumi.set(__self__, \"disk_encryption_status\", disk_encryption_status)\n if etag is not None:\n warnings.warn(\"\"\"This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"etag is deprecated: This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\")\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if failover_replica is not None:\n pulumi.set(__self__, \"failover_replica\", failover_replica)\n if gce_zone is not None:\n pulumi.set(__self__, \"gce_zone\", gce_zone)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_addresses is not None:\n pulumi.set(__self__, \"ip_addresses\", ip_addresses)\n if ipv6_address is not None:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if maintenance_version is not None:\n pulumi.set(__self__, \"maintenance_version\", maintenance_version)\n if master_instance_name is not None:\n pulumi.set(__self__, \"master_instance_name\", master_instance_name)\n if max_disk_size is not None:\n pulumi.set(__self__, \"max_disk_size\", max_disk_size)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if on_premises_configuration is not None:\n pulumi.set(__self__, \"on_premises_configuration\", on_premises_configuration)\n if out_of_disk_report is not None:\n pulumi.set(__self__, \"out_of_disk_report\", out_of_disk_report)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if replica_configuration is not None:\n pulumi.set(__self__, \"replica_configuration\", replica_configuration)\n if replica_names is not None:\n pulumi.set(__self__, \"replica_names\", replica_names)\n if root_password is not None:\n pulumi.set(__self__, \"root_password\", root_password)\n if satisfies_pzs is not None:\n pulumi.set(__self__, \"satisfies_pzs\", satisfies_pzs)\n if scheduled_maintenance is not None:\n pulumi.set(__self__, \"scheduled_maintenance\", scheduled_maintenance)\n if secondary_gce_zone is not None:\n pulumi.set(__self__, \"secondary_gce_zone\", secondary_gce_zone)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if server_ca_cert is not None:\n pulumi.set(__self__, \"server_ca_cert\", server_ca_cert)\n if service_account_email_address is not None:\n pulumi.set(__self__, \"service_account_email_address\", service_account_email_address)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if suspension_reason is not None:\n pulumi.set(__self__, \"suspension_reason\", suspension_reason)", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def check_runtime_vm(**kwargs):\n\n ti: TaskInstance = kwargs[\"ti\"]\n last_warning_time = ti.xcom_pull(\n key=TerraformTasks.XCOM_WARNING_TIME,\n task_ids=TerraformTasks.TASK_ID_VM_RUNTIME,\n dag_id=TerraformTasks.DAG_ID_DESTROY_VM,\n include_prior_dates=True,\n )\n start_time_vm = ti.xcom_pull(\n key=TerraformTasks.XCOM_START_TIME_VM,\n task_ids=TerraformTasks.TASK_ID_RUN,\n dag_id=TerraformTasks.DAG_ID_CREATE_VM,\n include_prior_dates=True,\n )\n\n if start_time_vm:\n # calculate number of hours passed since start time vm and now\n hours_on = (ti.start_date - start_time_vm).total_seconds() / 3600\n logging.info(\n f\"Start time VM: {start_time_vm}, hours passed since start time: {hours_on}, warning limit: \"\n f\"{TerraformTasks.VM_RUNTIME_H_WARNING}\"\n )\n\n # check if a warning has been sent previously and if so, how many hours ago\n if last_warning_time:\n hours_since_warning = (ti.start_date - last_warning_time).total_seconds() / 3600\n else:\n hours_since_warning = None\n\n # check if the VM has been on longer than the limit\n if hours_on > TerraformTasks.VM_RUNTIME_H_WARNING:\n # check if no warning was sent before or last time was longer ago than warning frequency\n if not hours_since_warning or hours_since_warning > TerraformTasks.WARNING_FREQUENCY_H:\n comments = (\n f\"Worker VM has been on since {start_time_vm}. No. hours passed since then: \"\n f\"{hours_on}.\"\n f\" Warning limit: {TerraformTasks.VM_RUNTIME_H_WARNING}H\"\n )\n project_id = Variable.get(AirflowVars.PROJECT_ID)\n slack_hook = create_slack_webhook(comments, project_id, **kwargs)\n\n # http_hook outputs the secret token, suppressing logging 'info' by setting level to 'warning'\n old_levels = change_task_log_level(logging.WARNING)\n slack_hook.execute()\n # change back to previous levels\n change_task_log_level(old_levels)\n\n ti.xcom_push(TerraformTasks.XCOM_WARNING_TIME, ti.start_date)\n else:\n logging.info(f\"Start time VM unknown.\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,\n base_instance_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution_policy_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n named_ports: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n stateful_disks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,\n target_pools: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n target_size: Optional[pulumi.Input[float]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,\n versions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,\n wait_for_instances: Optional[pulumi.Input[bool]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['auto_healing_policies'] = auto_healing_policies\n if base_instance_name is None:\n raise TypeError(\"Missing required property 'base_instance_name'\")\n __props__['base_instance_name'] = base_instance_name\n __props__['description'] = description\n __props__['distribution_policy_zones'] = distribution_policy_zones\n __props__['name'] = name\n __props__['named_ports'] = named_ports\n __props__['project'] = project\n if region is None:\n raise TypeError(\"Missing required property 'region'\")\n __props__['region'] = region\n __props__['stateful_disks'] = stateful_disks\n __props__['target_pools'] = target_pools\n __props__['target_size'] = target_size\n __props__['update_policy'] = update_policy\n if versions is None:\n raise TypeError(\"Missing required property 'versions'\")\n __props__['versions'] = versions\n __props__['wait_for_instances'] = wait_for_instances\n __props__['fingerprint'] = None\n __props__['instance_group'] = None\n __props__['self_link'] = None\n super(RegionInstanceGroupManager, __self__).__init__(\n 'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',\n resource_name,\n __props__,\n opts)", "def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}", "def check_stability(self):", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def test_delete_cluster_resource_quota(self):\n pass", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def update(self, validate=False):\r\n rs = self.connection.get_all_dbinstances(self.id)\r\n if len(rs) > 0:\r\n for i in rs:\r\n if i.id == self.id:\r\n self.__dict__.update(i.__dict__)\r\n elif validate:\r\n raise ValueError('%s is not a valid Instance ID' % self.id)\r\n return self.status", "def db_healthcheck() -> bool:\n\n try:\n result = query_db(\"Select 1\")\n app.logfile.info(\"Select 1\")\n return True\n except ConnectionError as err:\n app.logger.error(err)\n return False" ]
[ "0.61979055", "0.59958446", "0.5994143", "0.59490556", "0.5910241", "0.57950866", "0.5756366", "0.57093143", "0.56927115", "0.56697446", "0.56413835", "0.5618511", "0.55941415", "0.553929", "0.55206215", "0.5492208", "0.54921925", "0.54788786", "0.54787785", "0.5460842", "0.5450891", "0.5448313", "0.5404326", "0.5380374", "0.5376097", "0.53686094", "0.53631175", "0.5359793", "0.5329645", "0.5326954", "0.52969223", "0.52871245", "0.52844405", "0.5277203", "0.52731144", "0.5260457", "0.52599007", "0.5259319", "0.52466697", "0.5220914", "0.5204484", "0.5204305", "0.5201842", "0.51994777", "0.5196366", "0.51891136", "0.5189112", "0.5181033", "0.5180682", "0.5175264", "0.5170644", "0.5169792", "0.51666325", "0.51513404", "0.5149116", "0.51479197", "0.51453775", "0.51327753", "0.5127961", "0.5127489", "0.5124122", "0.5119716", "0.5103683", "0.50997496", "0.5098941", "0.5090552", "0.5089925", "0.5073289", "0.5070702", "0.5069236", "0.5062518", "0.5050799", "0.504799", "0.50465244", "0.5045154", "0.5037056", "0.5034119", "0.50318974", "0.5021108", "0.5019347", "0.5018738", "0.501726", "0.5016553", "0.5012556", "0.499607", "0.49827093", "0.4969785", "0.49653217", "0.4963236", "0.49597764", "0.49588728", "0.49472067", "0.49340862", "0.4932115", "0.49272308", "0.4926224", "0.49244505", "0.49226087", "0.4914747", "0.490748", "0.49041933" ]
0.0
-1
This operation is applicable to replica set instances and sharded cluster instances. You can call this operation to check whether resources are sufficient for creating an instance, upgrading a replica set or sharded cluster instance, or upgrading a single node of the sharded cluster instance. > You can call this operation a maximum of 200 times per minute.
def evaluate_resource( self, request: dds_20151201_models.EvaluateResourceRequest, ) -> dds_20151201_models.EvaluateResourceResponse: runtime = util_models.RuntimeOptions() return self.evaluate_resource_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")", "def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)", "def test_create_cluster_resource_quota(self):\n pass", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def test_patch_cluster_resource_quota_status(self):\n pass", "def check_autoscaling_group_health(asg_name, current_capacity_count):\n if_verbose(\"Checking the health of ASG %s\" % asg_name)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for ASG health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on asg_instances count. A manual clean up is likely.\"\n\n completed_instances = 0\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n while(len(asg_instances) != current_capacity_count):\n if_verbose(\"Waiting for all of %s's instances (%d) to appear healthy\" % (asg_name, args.instance_count_step))\n time.sleep(args.update_timeout)\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n for instance in asg_instances:\n if_verbose(\"Progress of ASG instance %s: %s\" % (instance[\"InstanceId\"], instance[\"LifecycleState\"]))\n\n if instance[\"LifecycleState\"] == \"InService\":\n completed_instances += 1\n\n if completed_instances >= len(asg_instances):\n if_verbose(\"We have %d healthy nodes and we wanted %d - moving on.\" % (completed_instances, len(asg_instances)))\n break\n else:\n completed_instances = 0\n\n if_verbose(\"ASG %s is healthy\" % asg_name)\n return None", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_instance_limit1(self):\n pass", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def test_read_cluster_resource_quota_status(self):\n pass", "def test_patch_cluster_resource_quota(self):\n pass", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def test_replace_cluster_resource_quota(self):\n pass", "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def test_redis_increase_replica_count_usual_case():", "def scale_up(nvms, valid_hostnames=None, vms_allegedly_running=0):\n\n global api, img, flavor, user_data, network, owned_instances\n\n # Try to get image if necessary\n if img is None:\n img = image(cf['api']['image_id'])\n if img is None:\n logging.error(\"Cannot scale up: image id %s not found\" % image(cf['api']['image_id']))\n return []\n\n n_succ = 0\n n_fail = 0\n logging.info(\"We need %d more VMs...\" % nvms)\n\n inst = running_instances(valid_hostnames)\n if inst is None:\n logging.error(\"No list of instances can be retrieved from API\")\n return []\n\n n_running_vms = len(inst) + vms_allegedly_running # number of *total* VMs running (also the ones *not* owned by HTCondor)\n if cf['quota']['max_vms'] >= 1:\n # We have a \"soft\" quota: respect it\n n_vms_to_start = int(min(nvms, cf['quota']['max_vms']-n_running_vms))\n if n_vms_to_start <= 0:\n logging.warning(\"Over quota (%d VMs already running out of %d): cannot launch any more VMs\" % \\\n (n_running_vms,cf['quota']['max_vms']))\n else:\n logging.warning(\"Quota enabled: requesting %d (out of desired %d) VMs\" % (n_vms_to_start,nvms))\n else:\n n_vms_to_start = int(nvms)\n\n # Launch VMs\n inst_ok = []\n for i in range(1, n_vms_to_start+1):\n\n success = False\n if int(cf['debug']['dry_run_boot_vms']) == 0:\n try:\n # Returns the reservation\n new_inst_id = img.run(\n token_id=api.keystone.token_id,\n key_name=cf['api']['key_name'],\n user_data=user_data,\n instance_type=flavor.id,\n network=network.id\n )\n\n # Get the single instance ID from the reservation\n owned_instances.append( new_inst_id )\n inst_ok.append( new_inst_id )\n\n success = True\n except Exception:\n logging.error(\"Cannot run instance via API: check your \\\"hard\\\" quota\")\n\n else:\n logging.info(\"Not running VM: dry run active\")\n success = True\n\n if success:\n n_succ+=1\n logging.info(\"VM launched OK. Requested: %d/%d | Success: %d | Failed: %d | ID: %s\" % \\\n (i, n_vms_to_start, n_succ, n_fail, new_inst_id))\n else:\n n_fail+=1\n logging.info(\"VM launch fail. Requested: %d/%d | Success: %d | Failed: %d\" % \\\n (i, n_vms_to_start, n_succ, n_fail))\n\n # Dump owned instances to file (if something changed)\n if n_succ > 0:\n save_owned_instances()\n\n return inst_ok", "def ec2_status(resource, metadata, return_count=False):\n\n instances = resource.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [metadata['fqdn']]},\n {'Name': 'instance-state-name', 'Values': ['pending', 'running']}, ])\n\n # get a count of the instances and then either return count or print results\n count = sum(1 for _ in instances)\n if return_count:\n # return count for conditional consumption in other functions\n return count\n else:\n # print for human consumption\n if count == 0:\n print(\"No instances running\")\n else:\n print(count, \"instances running\")\n print('{:20} {:15} {:22} {:18} {}'.format(\n 'instance_id', 'state', 'instance_name', 'public_ip_address', 'instance_role'))\n for instance in instances:\n # tags order does not deterministically stay from run to run and stored as list of dicts\n # tags = {instance.tags[0]['Key']: instance.tags[0]['Value'],\n # instance.tags[1]['Key']: instance.tags[1]['Value']}\n # probably there is a much better way to map this but let's make it a dict of tags\n tags = {}\n for tag in instance.tags:\n tags[tag['Key']] = tag['Value']\n\n print('{:20} {:15} {:22} {:18} {}'.format(\n instance.id, instance.state['Name'], tags['Name'],\n instance.public_ip_address, tags['Role']))", "def test_read_cluster_resource_quota(self):\n pass", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def check_owned_instance(st, instance_id):\n\n logging.info(\"Checking owned instance %s...\" % instance_id)\n\n global api, owned_instances\n\n # Get information from API: we need the IP address\n inst = api.nova.get_instance(token_id=api.keystone.token_id, instance_id=instance_id)\n\n # Check if the instance is in the list (using cached status)\n found = False\n for h in st['workers_status'].keys():\n if gethostbyname(h) == inst.network_ip(network_name=cf[\"api\"][\"network_name\"]):\n found = True\n break\n\n # Deal with errors\n if not found:\n logging.error(\"Instance %s (with IP %s) has not joined the cluster after %ds: terminating it\" % (instance_id, inst.private_ip_address, cf['elastiq']['estimated_vm_deploy_time_s']))\n\n try:\n inst.terminate(token_id=api.keystone.token_id)\n owned_instances.remove(instance_id)\n save_owned_instances()\n logging.info(\"Forcing shutdown of %s: OK\" % instance_id)\n except Exception as e:\n # Recheck in a while (10s) in case termination fails\n logging.error(\"Forcing shutdown of %s failed: rescheduling check\" % instance_id)\n return {\n 'action': 'check_owned_instance',\n 'when': time.time() + 10,\n 'params': [ instance_id ]\n }\n\n else:\n logging.debug(\"Instance %s (with IP %s) successfully joined the cluster within %ds\" % (instance_id, inst.network_ip(network_name=cf[\"api\"][\"network_name\"]), cf['elastiq']['estimated_vm_deploy_time_s']))\n\n return", "def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def test_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)", "def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)", "def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_list_cluster_resource_quota(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_instance_too_small_aws():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'instance-too-small-aws.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n cfg.validate()\n assert err.value.returncode == INPUT_ERROR\n print(err.value.message)\n assert 'does not have enough memory' in err.value.message", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def create(self):\n raise WufooException(\"InstanceResource creation not supported\")", "def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def check_availability(self):\n pass", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def check_elb_instance_health(elb_name, instances):\n if_verbose(\"Checking ELB %s instance health for %s\" % (elb_name, instances))\n timer = time.time()\n while (True):\n if_verbose(\"Sleeping for %d ELB instance health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired. A manual clean up is likely.\"\n\n healthy_elb_instances = 0\n elb_instances = elb.describe_instance_health(LoadBalancerName=elb_name, Instances=instances)\n for instance in elb_instances[\"InstanceStates\"]:\n if_verbose(\"Progress of ELB instance %s: %s\" % (instance[\"InstanceId\"], instance[\"State\"]))\n\n if instance[\"State\"] == \"InService\":\n healthy_elb_instances += 1\n\n if healthy_elb_instances == len(instances):\n break\n else:\n healthy_elb_instances = 0\n\n if_verbose(\"ELB %s is healthy with instances %s\" % (elb_name, elb_instances))\n return None", "def scale_up_autoscaling_group(asg_name, instance_count):\n if_verbose(\"Scaling up ASG %s to %d instances\" % (asg_name, instance_count))\n asg.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=instance_count)\n \n activities = []\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to come active\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities listing. A manual clean up is likely.\"\n\n activities = asg.describe_scaling_activities(AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step) \n \n if len(activities[\"Activities\"]) == args.instance_count_step:\n break\n\n activity_ids = [a[\"ActivityId\"] for a in activities[\"Activities\"]]\n\n if not len(activity_ids) > 0:\n return \"No activities found\" \n \n if_verbose(\"Activities found, checking them until complete or %ds timer expires\" % args.health_check_timeout)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to complete\" % args.update_timeout)\n time.sleep(args.update_timeout)\n \n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities check. A manual clean up is likely.\"\n\n completed_activities = 0\n activity_statuses = asg.describe_scaling_activities(ActivityIds=activity_ids, AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step)\n for activity in activity_statuses[\"Activities\"]:\n if_verbose(\"Progress of activity ID %s: %d\" % (activity[\"ActivityId\"], activity[\"Progress\"]))\n\n if activity[\"Progress\"] == 100:\n completed_activities += 1\n\n if completed_activities >= args.instance_count_step:\n break\n else:\n completed_activities = 0\n\n if_verbose(\"Scaling up of ASG %s successful\" % asg_name)\n return None", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def wait_for_instances(client, asg, desired_state=None, desired_health=None,\n desired_count=None):\n for i in range(61):\n if i == 60:\n raise Exception('Tried for 5 minutes, giving up.')\n sleep(10)\n _asg = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg['AutoScalingGroupName']],\n )['AutoScalingGroups'][0]\n\n if(\n desired_count is not None and\n len(_asg['Instances']) < desired_count\n ):\n continue\n\n # Check instance states\n all_matching = True\n for instance in _asg['Instances']:\n if(\n desired_state is not None and\n instance['LifecycleState'] != desired_state\n ):\n all_matching = False\n break\n if(\n desired_health is not None and\n instance['HealthStatus'] != desired_health\n ):\n all_matching = False\n break\n if all_matching:\n break", "def is_asg_scaled(asg_name, desired_capacity):\n logger.info('Checking asg {} instance count...'.format(asg_name))\n response = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name], MaxRecords=1\n )\n actual_instances = response['AutoScalingGroups'][0]['Instances']\n if len(actual_instances) != desired_capacity:\n logger.info('Asg {} does not have enough running instances to proceed'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = False\n else:\n logger.info('Asg {} scaled OK'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = True\n return is_scaled", "def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])", "def check_pacemaker_resource(self, resource_name, role, is_ha=True):\n n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, [role])\n d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)\n pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(\n d_ctrls[0].name, pure=True)['Online'])\n logger.info(\"pacemaker nodes are {0}\".format(pcm_nodes))\n resource_nodes = self.fuel_web.get_pacemaker_resource_location(\n d_ctrls[0].name, \"{}\".format(resource_name))\n if is_ha:\n for resource_node in resource_nodes:\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_node.name))\n config = self.fuel_web.get_pacemaker_config(resource_node.name)\n asserts.assert_not_equal(\n re.search(\n \"Clone Set: clone_{0} \\[{0}\\]\\s+Started: \\[ {1} \\]\".\n format(resource_name, pcm_nodes), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))\n else:\n asserts.assert_true(len(resource_nodes), 1)\n config = self.fuel_web.get_pacemaker_config(resource_nodes[0].name)\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_nodes[0].name))\n asserts.assert_not_equal(\n re.search(\"{0}\\s+\\(ocf::fuel:{1}\\):\\s+Started\".format(\n resource_name, resource_name.split(\"_\")[1]), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))", "def check_service_replication(\n instance_config,\n all_tasks,\n smartstack_replication_checker,\n):\n expected_count = instance_config.get_instances()\n log.info(\"Expecting %d total tasks for %s\" % (expected_count, instance_config.job_id))\n proxy_port = marathon_tools.get_proxy_port_for_instance(\n name=instance_config.service,\n instance=instance_config.instance,\n cluster=instance_config.cluster,\n soa_dir=instance_config.soa_dir,\n )\n\n registrations = instance_config.get_registrations()\n # if the primary registration does not match the service_instance name then\n # the best we can do is check marathon for replication (for now).\n if proxy_port is not None and registrations[0] == instance_config.job_id:\n check_smartstack_replication_for_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n smartstack_replication_checker=smartstack_replication_checker,\n )\n else:\n check_healthy_marathon_tasks_for_service_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n all_tasks=all_tasks,\n )", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def tcp_ping_nodes(self, timeout=20.0):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.check_version_request(timeout)", "def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))", "def check_vms(st):\n\n logging.info(\"Checking batch system's VMs...\")\n check_time = time.time()\n\n # Retrieve *all* running instances (also the non-owned ones) and filter out\n # statuses of workers which are not valid VMs: we are not interested in them\n rvms = running_instances()\n rvms2 = []\n\n rips = []\n if rvms is not None:\n for inst in rvms:\n ipv4 = inst.network_ip(network_name=cf[\"api\"][\"network_name\"])\n if ipv4 is not None:\n rips.append(ipv4)\n rvms2.append(inst)\n if len(rips) == 0:\n rips = None\n new_workers_status = BatchPlugin.poll_status( st['workers_status'], rips )\n\n rvms=rvms2\n\n if new_workers_status is not None:\n #logging.debug(new_workers_status)\n st['workers_status'] = new_workers_status\n new_workers_status = None\n\n hosts_shutdown = []\n for host,info in st['workers_status'].iteritems():\n if info['jobs'] != 0: continue\n if (check_time-info['unchangedsince']) > cf['elastiq']['idle_for_time_s']:\n logging.info(\"Host %s is idle for more than %ds: requesting shutdown\" % \\\n (host,cf['elastiq']['idle_for_time_s']))\n st['workers_status'][host]['unchangedsince'] = check_time # reset timer\n hosts_shutdown.append(host)\n\n if len(hosts_shutdown) > 0:\n inst_ok = scale_down(hosts_shutdown, valid_hostnames=st['workers_status'].keys())\n change_vms_allegedly_running(st, -len(inst_ok))\n\n # Scale up to reach the minimum quota, if any\n min_vms = cf['quota']['min_vms']\n if min_vms >= 1:\n rvms = running_instances(st['workers_status'].keys())\n if rvms is None:\n logging.warning(\"Cannot get list of running instances for honoring min quota of %d\" % min_vms)\n else:\n n_run = len(rvms)\n n_consider_run = n_run + st['vms_allegedly_running']\n logging.info(\"VMs: running=%d | allegedly running=%d | considering=%d\" % \\\n (n_run, st['vms_allegedly_running'], n_consider_run))\n n_vms = min_vms-n_consider_run\n if n_vms > 0:\n logging.info(\"Below minimum quota (%d VMs): requesting %d more VMs\" % \\\n (min_vms,n_vms))\n inst_ok = scale_up(n_vms, valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in inst_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n\n # OK: schedule when configured\n sched_when = time.time() + cf['elastiq']['check_vms_every_s']\n\n else:\n # Not OK: reschedule ASAP\n sched_when = 0\n\n return {\n 'action': 'check_vms',\n 'when': sched_when\n }", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')", "def test_update_hyperflex_cluster(self):\n pass", "def update_pvserver_instances(instances):\n status = {True: \"available\", False: \"in-use\"}\n for k, v in instances.items():\n is_available = is_pvserver_available(v[\"name\"], v[\"port\"])\n v[\"status\"] = status[is_available]", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def instance_outdated_age(instance_id, days_fresh):\n\n response = ec2_client.describe_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n\n instance_launch_time = response['Reservations'][0]['Instances'][0]['LaunchTime']\n\n # gets the age of a node by days only:\n instance_age = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).days)\n\n # gets the remaining age of a node in seconds (e.g. if node is y days and x seconds old this will only retrieve the x seconds):\n instance_age_remainder = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).seconds)\n\n if instance_age > days_fresh:\n logger.info(\"Instance id {} launch age of '{}' day(s) is older than expected '{}' day(s)\".format(instance_id, instance_age, days_fresh))\n return True\n elif (instance_age == days_fresh) and (instance_age_remainder > 0):\n logger.info(\"Instance id {} is older than expected '{}' day(s) by {} seconds.\".format(instance_id, days_fresh, instance_age_remainder))\n return True\n else:\n logger.info(\"Instance id {} : OK \".format(instance_id))\n return False", "def check_exactly_one_current_version(self):\n expected_state = \"CURRENT\"\n\n query = \"SELECT COUNT(*) FROM cluster_version;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.error(\"Unable to run query: {0}\".format(query))\n return\n\n count = result[0]\n if count == 0:\n msg = \"There are no cluster_versions. Start ambari-server, and then perform a Restart on one of the services.\\n\" + \\\n \"Then navigate to the \\\"Stacks and Versions > Versions\\\" page and ensure you can see the stack version.\\n\" + \\\n \"Next, restart all services, one-by-one, so that Ambari knows what version each component is running.\"\n Logger.warning(msg)\n elif count == 1:\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n\n repo_version_id = None\n repo_version = None\n cluster_version_state = None\n\n if result and len(result) == 3:\n repo_version_id = result[0]\n repo_version = result[1]\n cluster_version_state = result[2]\n\n if repo_version_id and repo_version and cluster_version_state:\n if cluster_version_state.upper() == expected_state:\n self.check_all_hosts(repo_version_id, repo_version)\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHDP STACK OVERVIEW\")\n\t Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Cluster HDP Version\\t{0}\".format(repo_version))\n Logger.info(\"Cluster State\\t{0}\".format(cluster_version_state))\n Logger.info(\"Ambari version\\t:{0}\".format(self.ambari_version))\n\n if self.ambari_server_user != \"root\" :\n Logger.info(\"Ambari Server as non-root?\\tYes\")\n else :\n Logger.info(\"Ambari Server as non-root?\\tNo\")\n\n # Read ambari-agent.ini file\n if os.path.exists(AMBARI_AGENT_INI):\n self.ambari_agent_props = self.read_conf_file(AMBARI_AGENT_INI)\n Logger.debug(\"Reading file {0}.\".format(self.ambari_agent_props))\n if \"run_as_user\" in self.ambari_agent_props:\n self.run_as_user = self.ambari_agent_props[\"run_as_user\"]\n if self.run_as_user != \"root\":\n Logger.info(\"Ambari Agent as non-root?\\tYes\")\n else:\n Logger.info(\"Ambari Agent as non-root?\\tNo\")\n else:\n Logger.error(\"Unable to read ambari-agent.ini file\")\n\n else:\n Logger.error(\"Cluster Version {0} should have a state of {1} but is {2}. Make sure to restart all of the Services.\".format(repo_version, expected_state, cluster_version_state))\n else:\n Logger.error(\"Unable to run query: {0}\".format(query))\n elif count > 1:\n # Ensure at least one Cluster Version is CURRENT\n Logger.info(\"Found multiple Cluster versions, checking that exactly one is {0}.\".format(expected_state))\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id WHERE cv.state = '{0}';\".format(expected_state)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n if rows:\n if len(rows) == 1:\n Logger.info(\"Good news; Cluster Version {0} has a state of {1}.\".format(rows[0][1], expected_state))\n self.check_all_hosts_current(rows[0][0], rows[0][1])\n elif len(rows) > 1:\n # Take the repo_version's version column\n repo_versions = [row[1] for row in rows if len(row) == 3]\n Logger.error(\"Found multiple cluster versions with a state of {0}, but only one should be {0}.\\n\" \\\n \"Will need to fix this manually, please contact Support. Cluster Versions found: {1}\".format(expected_state, \", \".join(repo_versions)))\n else:\n Logger.error(\"Unable to run query: {0}\\n\".format(query))\n pass", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def test_rebuilt_server_vcpus(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n server_actual_vcpus = remote_client.get_number_of_cpus()\n self.assertEqual(server_actual_vcpus, self.expected_vcpus)", "def check_fixed(self: AutoScaler) -> AutoScalerState:\n launched_size = len(self.clients)\n registered_size = Client.count_connected()\n task_count = Task.count_remaining()\n log.debug(f'Autoscale check (clients: {registered_size}/{launched_size}, tasks: {task_count})')\n if launched_size < self.min_size:\n log.debug(f'Autoscale min-size reached ({launched_size} < {self.min_size})')\n return AutoScalerState.SCALE\n if launched_size == 0 and task_count == 0:\n return AutoScalerState.WAIT\n if launched_size == 0 and task_count > 0:\n log.debug(f'Autoscale adding client ({task_count} tasks remaining)')\n return AutoScalerState.SCALE\n else:\n return AutoScalerState.WAIT", "def checkDBImportInstance(self, instance):\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tresult = (session.query(\n\t\t\t\tdbimportInstances.name\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tlogging.error(\"No DBImport Instance with that name can be found in table 'dbimport_instances'\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)", "def run_instances(self, image_id, min_count=1, max_count=1,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None,\r\n disable_api_termination=False,\r\n instance_initiated_shutdown_behavior=None,\r\n private_ip_address=None,\r\n placement_group=None, client_token=None,\r\n security_group_ids=None):\r\n params = {'ImageId':image_id,\r\n 'MinCount':min_count,\r\n 'MaxCount': max_count}\r\n if key_name:\r\n params['KeyName'] = key_name\r\n if security_group_ids:\r\n l = []\r\n for group in security_group_ids:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroupId')\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroup')\r\n if user_data:\r\n params['UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['AddressingType'] = addressing_type\r\n if instance_type:\r\n params['InstanceType'] = instance_type\r\n if placement:\r\n params['Placement.AvailabilityZone'] = placement\r\n if placement_group:\r\n params['Placement.GroupName'] = placement_group\r\n if kernel_id:\r\n params['KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['SubnetId'] = subnet_id\r\n if private_ip_address:\r\n params['PrivateIpAddress'] = private_ip_address\r\n if block_device_map:\r\n block_device_map.build_list_params(params)\r\n if disable_api_termination:\r\n params['DisableApiTermination'] = 'true'\r\n if instance_initiated_shutdown_behavior:\r\n val = instance_initiated_shutdown_behavior\r\n params['InstanceInitiatedShutdownBehavior'] = val\r\n if client_token:\r\n params['ClientToken'] = client_token\r\n return self.get_object('RunInstances', params, Reservation, verb='POST')", "def __init__(__self__, *,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input['DiskEncryptionConfigurationArgs']] = None,\n disk_encryption_status: Optional[pulumi.Input['DiskEncryptionStatusArgs']] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input['InstanceFailoverReplicaArgs']] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input['OnPremisesConfigurationArgs']] = None,\n out_of_disk_report: Optional[pulumi.Input['SqlOutOfDiskReportArgs']] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input['ReplicaConfigurationArgs']] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input['SqlScheduledMaintenanceArgs']] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input['SslCertArgs']] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input['SettingsArgs']] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None):\n if backend_type is not None:\n pulumi.set(__self__, \"backend_type\", backend_type)\n if connection_name is not None:\n pulumi.set(__self__, \"connection_name\", connection_name)\n if current_disk_size is not None:\n pulumi.set(__self__, \"current_disk_size\", current_disk_size)\n if database_version is not None:\n pulumi.set(__self__, \"database_version\", database_version)\n if disk_encryption_configuration is not None:\n pulumi.set(__self__, \"disk_encryption_configuration\", disk_encryption_configuration)\n if disk_encryption_status is not None:\n pulumi.set(__self__, \"disk_encryption_status\", disk_encryption_status)\n if etag is not None:\n warnings.warn(\"\"\"This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"etag is deprecated: This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\")\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if failover_replica is not None:\n pulumi.set(__self__, \"failover_replica\", failover_replica)\n if gce_zone is not None:\n pulumi.set(__self__, \"gce_zone\", gce_zone)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_addresses is not None:\n pulumi.set(__self__, \"ip_addresses\", ip_addresses)\n if ipv6_address is not None:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if maintenance_version is not None:\n pulumi.set(__self__, \"maintenance_version\", maintenance_version)\n if master_instance_name is not None:\n pulumi.set(__self__, \"master_instance_name\", master_instance_name)\n if max_disk_size is not None:\n pulumi.set(__self__, \"max_disk_size\", max_disk_size)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if on_premises_configuration is not None:\n pulumi.set(__self__, \"on_premises_configuration\", on_premises_configuration)\n if out_of_disk_report is not None:\n pulumi.set(__self__, \"out_of_disk_report\", out_of_disk_report)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if replica_configuration is not None:\n pulumi.set(__self__, \"replica_configuration\", replica_configuration)\n if replica_names is not None:\n pulumi.set(__self__, \"replica_names\", replica_names)\n if root_password is not None:\n pulumi.set(__self__, \"root_password\", root_password)\n if satisfies_pzs is not None:\n pulumi.set(__self__, \"satisfies_pzs\", satisfies_pzs)\n if scheduled_maintenance is not None:\n pulumi.set(__self__, \"scheduled_maintenance\", scheduled_maintenance)\n if secondary_gce_zone is not None:\n pulumi.set(__self__, \"secondary_gce_zone\", secondary_gce_zone)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if server_ca_cert is not None:\n pulumi.set(__self__, \"server_ca_cert\", server_ca_cert)\n if service_account_email_address is not None:\n pulumi.set(__self__, \"service_account_email_address\", service_account_email_address)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if suspension_reason is not None:\n pulumi.set(__self__, \"suspension_reason\", suspension_reason)", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def check_runtime_vm(**kwargs):\n\n ti: TaskInstance = kwargs[\"ti\"]\n last_warning_time = ti.xcom_pull(\n key=TerraformTasks.XCOM_WARNING_TIME,\n task_ids=TerraformTasks.TASK_ID_VM_RUNTIME,\n dag_id=TerraformTasks.DAG_ID_DESTROY_VM,\n include_prior_dates=True,\n )\n start_time_vm = ti.xcom_pull(\n key=TerraformTasks.XCOM_START_TIME_VM,\n task_ids=TerraformTasks.TASK_ID_RUN,\n dag_id=TerraformTasks.DAG_ID_CREATE_VM,\n include_prior_dates=True,\n )\n\n if start_time_vm:\n # calculate number of hours passed since start time vm and now\n hours_on = (ti.start_date - start_time_vm).total_seconds() / 3600\n logging.info(\n f\"Start time VM: {start_time_vm}, hours passed since start time: {hours_on}, warning limit: \"\n f\"{TerraformTasks.VM_RUNTIME_H_WARNING}\"\n )\n\n # check if a warning has been sent previously and if so, how many hours ago\n if last_warning_time:\n hours_since_warning = (ti.start_date - last_warning_time).total_seconds() / 3600\n else:\n hours_since_warning = None\n\n # check if the VM has been on longer than the limit\n if hours_on > TerraformTasks.VM_RUNTIME_H_WARNING:\n # check if no warning was sent before or last time was longer ago than warning frequency\n if not hours_since_warning or hours_since_warning > TerraformTasks.WARNING_FREQUENCY_H:\n comments = (\n f\"Worker VM has been on since {start_time_vm}. No. hours passed since then: \"\n f\"{hours_on}.\"\n f\" Warning limit: {TerraformTasks.VM_RUNTIME_H_WARNING}H\"\n )\n project_id = Variable.get(AirflowVars.PROJECT_ID)\n slack_hook = create_slack_webhook(comments, project_id, **kwargs)\n\n # http_hook outputs the secret token, suppressing logging 'info' by setting level to 'warning'\n old_levels = change_task_log_level(logging.WARNING)\n slack_hook.execute()\n # change back to previous levels\n change_task_log_level(old_levels)\n\n ti.xcom_push(TerraformTasks.XCOM_WARNING_TIME, ti.start_date)\n else:\n logging.info(f\"Start time VM unknown.\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,\n base_instance_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution_policy_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n named_ports: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n stateful_disks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,\n target_pools: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n target_size: Optional[pulumi.Input[float]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,\n versions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,\n wait_for_instances: Optional[pulumi.Input[bool]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['auto_healing_policies'] = auto_healing_policies\n if base_instance_name is None:\n raise TypeError(\"Missing required property 'base_instance_name'\")\n __props__['base_instance_name'] = base_instance_name\n __props__['description'] = description\n __props__['distribution_policy_zones'] = distribution_policy_zones\n __props__['name'] = name\n __props__['named_ports'] = named_ports\n __props__['project'] = project\n if region is None:\n raise TypeError(\"Missing required property 'region'\")\n __props__['region'] = region\n __props__['stateful_disks'] = stateful_disks\n __props__['target_pools'] = target_pools\n __props__['target_size'] = target_size\n __props__['update_policy'] = update_policy\n if versions is None:\n raise TypeError(\"Missing required property 'versions'\")\n __props__['versions'] = versions\n __props__['wait_for_instances'] = wait_for_instances\n __props__['fingerprint'] = None\n __props__['instance_group'] = None\n __props__['self_link'] = None\n super(RegionInstanceGroupManager, __self__).__init__(\n 'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',\n resource_name,\n __props__,\n opts)", "def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}", "def check_stability(self):", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def test_delete_cluster_resource_quota(self):\n pass", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def update(self, validate=False):\r\n rs = self.connection.get_all_dbinstances(self.id)\r\n if len(rs) > 0:\r\n for i in rs:\r\n if i.id == self.id:\r\n self.__dict__.update(i.__dict__)\r\n elif validate:\r\n raise ValueError('%s is not a valid Instance ID' % self.id)\r\n return self.status", "def db_healthcheck() -> bool:\n\n try:\n result = query_db(\"Select 1\")\n app.logfile.info(\"Select 1\")\n return True\n except ConnectionError as err:\n app.logger.error(err)\n return False" ]
[ "0.61977696", "0.59943205", "0.5993166", "0.5947614", "0.59099776", "0.5793798", "0.5754149", "0.57099897", "0.56931806", "0.5669819", "0.5640985", "0.5618391", "0.55936277", "0.5537415", "0.5518622", "0.54929787", "0.5492541", "0.54782796", "0.5477664", "0.54595304", "0.54503506", "0.5448645", "0.5402863", "0.5379081", "0.5374825", "0.5368817", "0.5363126", "0.5359722", "0.53289676", "0.53256845", "0.5297527", "0.52857167", "0.5284727", "0.527614", "0.527284", "0.5258998", "0.52584803", "0.52582496", "0.5245029", "0.5219436", "0.5204467", "0.52035713", "0.52010155", "0.5199581", "0.51962185", "0.5189876", "0.5187886", "0.51823515", "0.51798314", "0.5174562", "0.5171759", "0.516838", "0.5165408", "0.5149708", "0.5148164", "0.5147531", "0.51457584", "0.5131391", "0.51281106", "0.512661", "0.5123801", "0.512119", "0.5103619", "0.5098898", "0.50973976", "0.50894976", "0.5088131", "0.5071653", "0.50690156", "0.50673836", "0.5061204", "0.50485796", "0.5046665", "0.5044445", "0.504355", "0.503482", "0.50327575", "0.50319046", "0.50198346", "0.5018546", "0.5018259", "0.50179106", "0.5016462", "0.5011347", "0.49951693", "0.49837384", "0.49679372", "0.4964339", "0.4963195", "0.49599385", "0.49577937", "0.49474788", "0.49328753", "0.49312326", "0.4926949", "0.4923914", "0.49226472", "0.49210218", "0.49134505", "0.49060914", "0.4904296" ]
0.0
-1
This operation is applicable to replica set instances and sharded cluster instances. You can call this operation to check whether resources are sufficient for creating an instance, upgrading a replica set or sharded cluster instance, or upgrading a single node of the sharded cluster instance. > You can call this operation a maximum of 200 times per minute.
async def evaluate_resource_async( self, request: dds_20151201_models.EvaluateResourceRequest, ) -> dds_20151201_models.EvaluateResourceResponse: runtime = util_models.RuntimeOptions() return await self.evaluate_resource_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def test_instance_not_overscaled(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) < 3)", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")", "def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)", "def test_create_cluster_resource_quota(self):\n pass", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def test_patch_cluster_resource_quota_status(self):\n pass", "def check_autoscaling_group_health(asg_name, current_capacity_count):\n if_verbose(\"Checking the health of ASG %s\" % asg_name)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for ASG health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on asg_instances count. A manual clean up is likely.\"\n\n completed_instances = 0\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n while(len(asg_instances) != current_capacity_count):\n if_verbose(\"Waiting for all of %s's instances (%d) to appear healthy\" % (asg_name, args.instance_count_step))\n time.sleep(args.update_timeout)\n asg_instances = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]\n\n for instance in asg_instances:\n if_verbose(\"Progress of ASG instance %s: %s\" % (instance[\"InstanceId\"], instance[\"LifecycleState\"]))\n\n if instance[\"LifecycleState\"] == \"InService\":\n completed_instances += 1\n\n if completed_instances >= len(asg_instances):\n if_verbose(\"We have %d healthy nodes and we wanted %d - moving on.\" % (completed_instances, len(asg_instances)))\n break\n else:\n completed_instances = 0\n\n if_verbose(\"ASG %s is healthy\" % asg_name)\n return None", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_update_instance_limit(self):\n pass", "def test_update_instance_limit1(self):\n pass", "def test_read_cluster_resource_quota_status(self):\n pass", "def check_number_of_instances(self):\r\n\r\n if RecomendationDBManagement.management_instances_created != 0:\r\n raise ValueError(\"There can only be one database manager\")\r\n else:\r\n RecomendationDBManagement.management_instances_created = RecomendationDBManagement.management_instances_created + 1", "def test_patch_cluster_resource_quota(self):\n pass", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def test_replace_cluster_resource_quota(self):\n pass", "def _Exists(self, instance_only: bool = False) -> bool:\n cmd = util.GcloudCommand(self, 'spanner', 'instances', 'describe',\n self.name)\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner instance %s.', self.name)\n return False\n\n if instance_only:\n return True\n\n cmd = util.GcloudCommand(self, 'spanner', 'databases', 'describe',\n self.database)\n cmd.flags['instance'] = self.name\n\n # Do not log error or warning when checking existence.\n _, _, retcode = cmd.Issue(suppress_warning=True, raise_on_failure=False)\n if retcode != 0:\n logging.info('Could not find GCP Spanner database %s.', self.database)\n return False\n\n return True", "def test_redis_increase_replica_count_usual_case():", "def scale_up(nvms, valid_hostnames=None, vms_allegedly_running=0):\n\n global api, img, flavor, user_data, network, owned_instances\n\n # Try to get image if necessary\n if img is None:\n img = image(cf['api']['image_id'])\n if img is None:\n logging.error(\"Cannot scale up: image id %s not found\" % image(cf['api']['image_id']))\n return []\n\n n_succ = 0\n n_fail = 0\n logging.info(\"We need %d more VMs...\" % nvms)\n\n inst = running_instances(valid_hostnames)\n if inst is None:\n logging.error(\"No list of instances can be retrieved from API\")\n return []\n\n n_running_vms = len(inst) + vms_allegedly_running # number of *total* VMs running (also the ones *not* owned by HTCondor)\n if cf['quota']['max_vms'] >= 1:\n # We have a \"soft\" quota: respect it\n n_vms_to_start = int(min(nvms, cf['quota']['max_vms']-n_running_vms))\n if n_vms_to_start <= 0:\n logging.warning(\"Over quota (%d VMs already running out of %d): cannot launch any more VMs\" % \\\n (n_running_vms,cf['quota']['max_vms']))\n else:\n logging.warning(\"Quota enabled: requesting %d (out of desired %d) VMs\" % (n_vms_to_start,nvms))\n else:\n n_vms_to_start = int(nvms)\n\n # Launch VMs\n inst_ok = []\n for i in range(1, n_vms_to_start+1):\n\n success = False\n if int(cf['debug']['dry_run_boot_vms']) == 0:\n try:\n # Returns the reservation\n new_inst_id = img.run(\n token_id=api.keystone.token_id,\n key_name=cf['api']['key_name'],\n user_data=user_data,\n instance_type=flavor.id,\n network=network.id\n )\n\n # Get the single instance ID from the reservation\n owned_instances.append( new_inst_id )\n inst_ok.append( new_inst_id )\n\n success = True\n except Exception:\n logging.error(\"Cannot run instance via API: check your \\\"hard\\\" quota\")\n\n else:\n logging.info(\"Not running VM: dry run active\")\n success = True\n\n if success:\n n_succ+=1\n logging.info(\"VM launched OK. Requested: %d/%d | Success: %d | Failed: %d | ID: %s\" % \\\n (i, n_vms_to_start, n_succ, n_fail, new_inst_id))\n else:\n n_fail+=1\n logging.info(\"VM launch fail. Requested: %d/%d | Success: %d | Failed: %d\" % \\\n (i, n_vms_to_start, n_succ, n_fail))\n\n # Dump owned instances to file (if something changed)\n if n_succ > 0:\n save_owned_instances()\n\n return inst_ok", "def ec2_status(resource, metadata, return_count=False):\n\n instances = resource.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [metadata['fqdn']]},\n {'Name': 'instance-state-name', 'Values': ['pending', 'running']}, ])\n\n # get a count of the instances and then either return count or print results\n count = sum(1 for _ in instances)\n if return_count:\n # return count for conditional consumption in other functions\n return count\n else:\n # print for human consumption\n if count == 0:\n print(\"No instances running\")\n else:\n print(count, \"instances running\")\n print('{:20} {:15} {:22} {:18} {}'.format(\n 'instance_id', 'state', 'instance_name', 'public_ip_address', 'instance_role'))\n for instance in instances:\n # tags order does not deterministically stay from run to run and stored as list of dicts\n # tags = {instance.tags[0]['Key']: instance.tags[0]['Value'],\n # instance.tags[1]['Key']: instance.tags[1]['Value']}\n # probably there is a much better way to map this but let's make it a dict of tags\n tags = {}\n for tag in instance.tags:\n tags[tag['Key']] = tag['Value']\n\n print('{:20} {:15} {:22} {:18} {}'.format(\n instance.id, instance.state['Name'], tags['Name'],\n instance.public_ip_address, tags['Role']))", "def test_read_cluster_resource_quota(self):\n pass", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))", "def test_eks_worker_node_managed_by_eks(self) -> None:\n response = self.ec2.describe_instances(Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': ['eks-prod']\n }\n ])\n worker_instances = response.get('Reservations')[0].get('Instances')\n self.assertEqual(1, len(worker_instances))", "def check_owned_instance(st, instance_id):\n\n logging.info(\"Checking owned instance %s...\" % instance_id)\n\n global api, owned_instances\n\n # Get information from API: we need the IP address\n inst = api.nova.get_instance(token_id=api.keystone.token_id, instance_id=instance_id)\n\n # Check if the instance is in the list (using cached status)\n found = False\n for h in st['workers_status'].keys():\n if gethostbyname(h) == inst.network_ip(network_name=cf[\"api\"][\"network_name\"]):\n found = True\n break\n\n # Deal with errors\n if not found:\n logging.error(\"Instance %s (with IP %s) has not joined the cluster after %ds: terminating it\" % (instance_id, inst.private_ip_address, cf['elastiq']['estimated_vm_deploy_time_s']))\n\n try:\n inst.terminate(token_id=api.keystone.token_id)\n owned_instances.remove(instance_id)\n save_owned_instances()\n logging.info(\"Forcing shutdown of %s: OK\" % instance_id)\n except Exception as e:\n # Recheck in a while (10s) in case termination fails\n logging.error(\"Forcing shutdown of %s failed: rescheduling check\" % instance_id)\n return {\n 'action': 'check_owned_instance',\n 'when': time.time() + 10,\n 'params': [ instance_id ]\n }\n\n else:\n logging.debug(\"Instance %s (with IP %s) successfully joined the cluster within %ds\" % (instance_id, inst.network_ip(network_name=cf[\"api\"][\"network_name\"]), cf['elastiq']['estimated_vm_deploy_time_s']))\n\n return", "def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst", "def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)", "def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)", "def scale(options):\n\n # ONLY GCE is supported for scaling at this time\n cluster = gce_cluster_control(options)\n if options.test_k8s:\n k8s = k8s_control_test(options)\n else:\n k8s = k8s_control(options)\n\n slack_logger.addHandler(slack_handler(options.slack_token))\n if not options.slack_token:\n scale_logger.info(\n \"No message will be sent to slack, since there is no token provided\")\n\n scale_logger.info(\"Scaling on cluster %s\", k8s.get_cluster_name())\n\n nodes = [] # a list of nodes that are NOT critical\n for node in k8s.nodes:\n if node.metadata.name not in k8s.critical_node_names:\n nodes.append(node)\n\n # Shuffle the node list so that when there are multiple nodes\n # with same number of pods, they will be randomly picked to\n # be made unschedulable\n random.shuffle(nodes)\n\n # goal is the total number of nodes we want in the cluster\n goal = schedule_goal(k8s, options)\n\n scale_logger.info(\"Total nodes in the cluster: %i\", len(k8s.nodes))\n scale_logger.info(\n \"%i nodes are unschedulable at this time\", k8s.get_num_schedulable())\n scale_logger.info(\"Found %i critical nodes\",\n len(k8s.nodes) - len(nodes))\n scale_logger.info(\"Recommending total %i nodes for service\", goal)\n\n if confirm((\"Updating unschedulable flags to ensure %i nodes are unschedulable\" % max(len(k8s.nodes) - goal, 0))):\n update_unschedulable(max(len(k8s.nodes) - goal, 0), nodes, k8s)\n\n if goal > len(k8s.nodes):\n scale_logger.info(\n \"Resize the cluster to %i nodes to satisfy the demand\", goal)\n if options.test_cloud:\n resize_for_new_nodes_test(goal, k8s, cluster)\n else:\n slack_logger.info(\n \"Cluster resized to %i nodes to satisfy the demand\", goal)\n resize_for_new_nodes(goal, k8s, cluster)\n if options.test_cloud:\n shutdown_empty_nodes_test(nodes, k8s, cluster)\n else:\n # CRITICAL NODES SHOULD NOT BE SHUTDOWN\n shutdown_empty_nodes(nodes, k8s, cluster)", "def test_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n always_expand=True)\n instances.should.have.length_of(1)\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def test_list_cluster_resource_quota(self):\n pass", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_instance_too_small_aws():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'instance-too-small-aws.ini'))\n with pytest.raises(UserReportError) as err:\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n cfg.validate()\n assert err.value.returncode == INPUT_ERROR\n print(err.value.message)\n assert 'does not have enough memory' in err.value.message", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def xtest_instance_api_negative(self):\n\n # Test creating a db instance.\n LOG.info(\"* Creating db instance\")\n body = r\"\"\"\n {\"instance\": {\n \"name\": \"dbapi_test\",\n \"flavorRef\": \"medium\",\n \"port\": \"3306\",\n \"dbtype\": {\n \"name\": \"mysql\",\n \"version\": \"5.5\"\n }\n }\n }\"\"\"\n\n req = httplib2.Http(\".cache\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", body, AUTH_HEADER)\n LOG.debug(content)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n\n self.instance_id = content['instance']['id']\n LOG.debug(\"Instance ID: %s\" % self.instance_id)\n\n # Assert 1) that the request was accepted and 2) that the response\n # is in the expected format.\n self.assertEqual(201, resp.status)\n self.assertTrue(content.has_key('instance'))\n\n\n # Test creating an instance without a body in the request.\n LOG.info(\"* Creating an instance without a body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test creating an instance with a malformed body.\n LOG.info(\"* Creating an instance with a malformed body\")\n resp, content = req.request(API_URL + \"instances\", \"POST\", r\"\"\"{\"instance\": {}}\"\"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(500, resp.status)\n \n # Test listing all db instances with a body in the request.\n LOG.info(\"* Listing all db instances with a body\")\n resp, content = req.request(API_URL + \"instances\", \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n # Test getting a specific db instance with a body in the request.\n LOG.info(\"* Getting instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"GET\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test getting a non-existent db instance.\n LOG.info(\"* Getting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy\", \"GET\", \"\", AUTH_HEADER)\n content = json.loads(content)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n\n # Test immediately resetting the password on a db instance with a body in the request.\n LOG.info(\"* Resetting password on instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/resetpassword\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n \n\n # Test resetting the password on a db instance for a non-existent instance\n LOG.info(\"* Resetting password on dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/resetpassword\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n \n # Test restarting a db instance for a non-existent instance\n LOG.info(\"* Restarting dummy instance\")\n resp, content = req.request(API_URL + \"instances/dummy/restart\", \"POST\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test immediately restarting a db instance with a body in the request.\n LOG.info(\"* Restarting instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id + \"/restart\", \"POST\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status) \n\n # Test deleting an instance with a body in the request.\n LOG.info(\"* Testing delete of instance %s with a body in the request\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", body, AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)\n\n # Test that trying to delete an already deleted instance returns\n # the proper error code.\n LOG.info(\"* Testing re-delete of instance %s\" % self.instance_id)\n resp, content = req.request(API_URL + \"instances/\" + self.instance_id, \"DELETE\", \"\", AUTH_HEADER)\n LOG.debug(resp)\n LOG.debug(content)\n self.assertEqual(404, resp.status)", "def create(self):\n raise WufooException(\"InstanceResource creation not supported\")", "def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")", "def create_cluster(rs):\n\n rs.create_cluster(verbose=False)\n print('Creating cluster. Will check every 30 seconds for completed creation.')\n cluster_built = False\n while not cluster_built:\n print('Sleeping 30 seconds.')\n time.sleep(30)\n cluster_built = check_available(rs)", "def check_availability(self):\n pass", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def check_elb_instance_health(elb_name, instances):\n if_verbose(\"Checking ELB %s instance health for %s\" % (elb_name, instances))\n timer = time.time()\n while (True):\n if_verbose(\"Sleeping for %d ELB instance health\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired. A manual clean up is likely.\"\n\n healthy_elb_instances = 0\n elb_instances = elb.describe_instance_health(LoadBalancerName=elb_name, Instances=instances)\n for instance in elb_instances[\"InstanceStates\"]:\n if_verbose(\"Progress of ELB instance %s: %s\" % (instance[\"InstanceId\"], instance[\"State\"]))\n\n if instance[\"State\"] == \"InService\":\n healthy_elb_instances += 1\n\n if healthy_elb_instances == len(instances):\n break\n else:\n healthy_elb_instances = 0\n\n if_verbose(\"ELB %s is healthy with instances %s\" % (elb_name, elb_instances))\n return None", "def scale_up_autoscaling_group(asg_name, instance_count):\n if_verbose(\"Scaling up ASG %s to %d instances\" % (asg_name, instance_count))\n asg.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=instance_count)\n \n activities = []\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to come active\" % args.update_timeout)\n time.sleep(args.update_timeout)\n\n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities listing. A manual clean up is likely.\"\n\n activities = asg.describe_scaling_activities(AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step) \n \n if len(activities[\"Activities\"]) == args.instance_count_step:\n break\n\n activity_ids = [a[\"ActivityId\"] for a in activities[\"Activities\"]]\n\n if not len(activity_ids) > 0:\n return \"No activities found\" \n \n if_verbose(\"Activities found, checking them until complete or %ds timer expires\" % args.health_check_timeout)\n timer = time.time()\n while(True):\n if_verbose(\"Sleeping for %d seconds whilst waiting for activities to complete\" % args.update_timeout)\n time.sleep(args.update_timeout)\n \n if int(time.time() - timer) >= args.health_check_timeout:\n return \"Health check timer expired on activities check. A manual clean up is likely.\"\n\n completed_activities = 0\n activity_statuses = asg.describe_scaling_activities(ActivityIds=activity_ids, AutoScalingGroupName=asg_name, MaxRecords=args.instance_count_step)\n for activity in activity_statuses[\"Activities\"]:\n if_verbose(\"Progress of activity ID %s: %d\" % (activity[\"ActivityId\"], activity[\"Progress\"]))\n\n if activity[\"Progress\"] == 100:\n completed_activities += 1\n\n if completed_activities >= args.instance_count_step:\n break\n else:\n completed_activities = 0\n\n if_verbose(\"Scaling up of ASG %s successful\" % asg_name)\n return None", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def wait_for_instances(client, asg, desired_state=None, desired_health=None,\n desired_count=None):\n for i in range(61):\n if i == 60:\n raise Exception('Tried for 5 minutes, giving up.')\n sleep(10)\n _asg = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg['AutoScalingGroupName']],\n )['AutoScalingGroups'][0]\n\n if(\n desired_count is not None and\n len(_asg['Instances']) < desired_count\n ):\n continue\n\n # Check instance states\n all_matching = True\n for instance in _asg['Instances']:\n if(\n desired_state is not None and\n instance['LifecycleState'] != desired_state\n ):\n all_matching = False\n break\n if(\n desired_health is not None and\n instance['HealthStatus'] != desired_health\n ):\n all_matching = False\n break\n if all_matching:\n break", "def is_asg_scaled(asg_name, desired_capacity):\n logger.info('Checking asg {} instance count...'.format(asg_name))\n response = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name], MaxRecords=1\n )\n actual_instances = response['AutoScalingGroups'][0]['Instances']\n if len(actual_instances) != desired_capacity:\n logger.info('Asg {} does not have enough running instances to proceed'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = False\n else:\n logger.info('Asg {} scaled OK'.format(asg_name))\n logger.info('Actual instances: {} Desired instances: {}'.format(\n len(actual_instances),\n desired_capacity)\n )\n is_scaled = True\n return is_scaled", "def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])", "def check_pacemaker_resource(self, resource_name, role, is_ha=True):\n n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n self.cluster_id, [role])\n d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)\n pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(\n d_ctrls[0].name, pure=True)['Online'])\n logger.info(\"pacemaker nodes are {0}\".format(pcm_nodes))\n resource_nodes = self.fuel_web.get_pacemaker_resource_location(\n d_ctrls[0].name, \"{}\".format(resource_name))\n if is_ha:\n for resource_node in resource_nodes:\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_node.name))\n config = self.fuel_web.get_pacemaker_config(resource_node.name)\n asserts.assert_not_equal(\n re.search(\n \"Clone Set: clone_{0} \\[{0}\\]\\s+Started: \\[ {1} \\]\".\n format(resource_name, pcm_nodes), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))\n else:\n asserts.assert_true(len(resource_nodes), 1)\n config = self.fuel_web.get_pacemaker_config(resource_nodes[0].name)\n logger.info(\"Check resource [{0}] on node {1}\".format(\n resource_name, resource_nodes[0].name))\n asserts.assert_not_equal(\n re.search(\"{0}\\s+\\(ocf::fuel:{1}\\):\\s+Started\".format(\n resource_name, resource_name.split(\"_\")[1]), config), None,\n 'Resource [{0}] is not properly configured'.format(\n resource_name))", "def check_service_replication(\n instance_config,\n all_tasks,\n smartstack_replication_checker,\n):\n expected_count = instance_config.get_instances()\n log.info(\"Expecting %d total tasks for %s\" % (expected_count, instance_config.job_id))\n proxy_port = marathon_tools.get_proxy_port_for_instance(\n name=instance_config.service,\n instance=instance_config.instance,\n cluster=instance_config.cluster,\n soa_dir=instance_config.soa_dir,\n )\n\n registrations = instance_config.get_registrations()\n # if the primary registration does not match the service_instance name then\n # the best we can do is check marathon for replication (for now).\n if proxy_port is not None and registrations[0] == instance_config.job_id:\n check_smartstack_replication_for_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n smartstack_replication_checker=smartstack_replication_checker,\n )\n else:\n check_healthy_marathon_tasks_for_service_instance(\n instance_config=instance_config,\n expected_count=expected_count,\n all_tasks=all_tasks,\n )", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()", "def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)", "def test_upgrade_plan_all_fine(setup, skuba):\n\n setup_kubernetes_version(skuba)\n out = skuba.cluster_upgrade_plan()\n\n assert out.find(\n \"Congratulations! You are already at the latest version available\"\n ) != -1", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def tcp_ping_nodes(self, timeout=20.0):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.check_version_request(timeout)", "def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))", "def check_vms(st):\n\n logging.info(\"Checking batch system's VMs...\")\n check_time = time.time()\n\n # Retrieve *all* running instances (also the non-owned ones) and filter out\n # statuses of workers which are not valid VMs: we are not interested in them\n rvms = running_instances()\n rvms2 = []\n\n rips = []\n if rvms is not None:\n for inst in rvms:\n ipv4 = inst.network_ip(network_name=cf[\"api\"][\"network_name\"])\n if ipv4 is not None:\n rips.append(ipv4)\n rvms2.append(inst)\n if len(rips) == 0:\n rips = None\n new_workers_status = BatchPlugin.poll_status( st['workers_status'], rips )\n\n rvms=rvms2\n\n if new_workers_status is not None:\n #logging.debug(new_workers_status)\n st['workers_status'] = new_workers_status\n new_workers_status = None\n\n hosts_shutdown = []\n for host,info in st['workers_status'].iteritems():\n if info['jobs'] != 0: continue\n if (check_time-info['unchangedsince']) > cf['elastiq']['idle_for_time_s']:\n logging.info(\"Host %s is idle for more than %ds: requesting shutdown\" % \\\n (host,cf['elastiq']['idle_for_time_s']))\n st['workers_status'][host]['unchangedsince'] = check_time # reset timer\n hosts_shutdown.append(host)\n\n if len(hosts_shutdown) > 0:\n inst_ok = scale_down(hosts_shutdown, valid_hostnames=st['workers_status'].keys())\n change_vms_allegedly_running(st, -len(inst_ok))\n\n # Scale up to reach the minimum quota, if any\n min_vms = cf['quota']['min_vms']\n if min_vms >= 1:\n rvms = running_instances(st['workers_status'].keys())\n if rvms is None:\n logging.warning(\"Cannot get list of running instances for honoring min quota of %d\" % min_vms)\n else:\n n_run = len(rvms)\n n_consider_run = n_run + st['vms_allegedly_running']\n logging.info(\"VMs: running=%d | allegedly running=%d | considering=%d\" % \\\n (n_run, st['vms_allegedly_running'], n_consider_run))\n n_vms = min_vms-n_consider_run\n if n_vms > 0:\n logging.info(\"Below minimum quota (%d VMs): requesting %d more VMs\" % \\\n (min_vms,n_vms))\n inst_ok = scale_up(n_vms, valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in inst_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n\n # OK: schedule when configured\n sched_when = time.time() + cf['elastiq']['check_vms_every_s']\n\n else:\n # Not OK: reschedule ASAP\n sched_when = 0\n\n return {\n 'action': 'check_vms',\n 'when': sched_when\n }", "def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1", "def test_update_hyperflex_cluster(self):\n pass", "def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')", "def update_pvserver_instances(instances):\n status = {True: \"available\", False: \"in-use\"}\n for k, v in instances.items():\n is_available = is_pvserver_available(v[\"name\"], v[\"port\"])\n v[\"status\"] = status[is_available]", "def update_available_resource(self, context):\n # ask hypervisor for its view of resource availability &\n # usage:\n resources = self.driver.get_available_resource()\n if not resources:\n # The virt driver does not support this function\n LOG.warn(_(\"Virt driver does not support \"\n \"'get_available_resource' Compute tracking is disabled.\"))\n self.compute_node = None\n self.claims = {}\n return\n\n # Confirm resources dictionary contains expected keys:\n self._verify_resources(resources)\n\n resources['free_ram_mb'] = resources['memory_mb'] - \\\n resources['memory_mb_used']\n resources['free_disk_gb'] = resources['local_gb'] - \\\n resources['local_gb_used']\n\n LOG.audit(_(\"free_ram_mb: %s\") % resources['free_ram_mb'])\n LOG.audit(_(\"free_disk_gb: %s\") % resources['free_disk_gb'])\n # Apply resource claims representing in-progress operations to\n # 'resources'. This may over-estimate the amount of resources in use,\n # at least until the next time 'update_available_resource' runs.\n self._apply_claims(resources)\n\n # also generate all load stats:\n values = self._create_load_stats(context)\n resources.update(values)\n\n if not self.compute_node:\n # we need a copy of the ComputeNode record:\n service = self._get_service(context)\n if not service:\n # no service record, disable resource\n return\n\n compute_node_ref = service['compute_node']\n if compute_node_ref:\n self.compute_node = compute_node_ref[0]\n\n if not self.compute_node:\n # Need to create the ComputeNode record:\n resources['service_id'] = service['id']\n self.compute_node = self._create(context, resources)\n LOG.info(_('Compute_service record created for %s ') % self.host)\n\n else:\n # just update the record:\n self.compute_node = self._update(context, resources,\n prune_stats=True)\n LOG.info(_('Compute_service record updated for %s ') % self.host)", "def instance_outdated_age(instance_id, days_fresh):\n\n response = ec2_client.describe_instances(\n InstanceIds=[\n instance_id,\n ]\n )\n\n instance_launch_time = response['Reservations'][0]['Instances'][0]['LaunchTime']\n\n # gets the age of a node by days only:\n instance_age = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).days)\n\n # gets the remaining age of a node in seconds (e.g. if node is y days and x seconds old this will only retrieve the x seconds):\n instance_age_remainder = ((datetime.datetime.now(instance_launch_time.tzinfo) - instance_launch_time).seconds)\n\n if instance_age > days_fresh:\n logger.info(\"Instance id {} launch age of '{}' day(s) is older than expected '{}' day(s)\".format(instance_id, instance_age, days_fresh))\n return True\n elif (instance_age == days_fresh) and (instance_age_remainder > 0):\n logger.info(\"Instance id {} is older than expected '{}' day(s) by {} seconds.\".format(instance_id, days_fresh, instance_age_remainder))\n return True\n else:\n logger.info(\"Instance id {} : OK \".format(instance_id))\n return False", "def check_exactly_one_current_version(self):\n expected_state = \"CURRENT\"\n\n query = \"SELECT COUNT(*) FROM cluster_version;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.error(\"Unable to run query: {0}\".format(query))\n return\n\n count = result[0]\n if count == 0:\n msg = \"There are no cluster_versions. Start ambari-server, and then perform a Restart on one of the services.\\n\" + \\\n \"Then navigate to the \\\"Stacks and Versions > Versions\\\" page and ensure you can see the stack version.\\n\" + \\\n \"Next, restart all services, one-by-one, so that Ambari knows what version each component is running.\"\n Logger.warning(msg)\n elif count == 1:\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id;\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n\n repo_version_id = None\n repo_version = None\n cluster_version_state = None\n\n if result and len(result) == 3:\n repo_version_id = result[0]\n repo_version = result[1]\n cluster_version_state = result[2]\n\n if repo_version_id and repo_version and cluster_version_state:\n if cluster_version_state.upper() == expected_state:\n self.check_all_hosts(repo_version_id, repo_version)\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHDP STACK OVERVIEW\")\n\t Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Cluster HDP Version\\t{0}\".format(repo_version))\n Logger.info(\"Cluster State\\t{0}\".format(cluster_version_state))\n Logger.info(\"Ambari version\\t:{0}\".format(self.ambari_version))\n\n if self.ambari_server_user != \"root\" :\n Logger.info(\"Ambari Server as non-root?\\tYes\")\n else :\n Logger.info(\"Ambari Server as non-root?\\tNo\")\n\n # Read ambari-agent.ini file\n if os.path.exists(AMBARI_AGENT_INI):\n self.ambari_agent_props = self.read_conf_file(AMBARI_AGENT_INI)\n Logger.debug(\"Reading file {0}.\".format(self.ambari_agent_props))\n if \"run_as_user\" in self.ambari_agent_props:\n self.run_as_user = self.ambari_agent_props[\"run_as_user\"]\n if self.run_as_user != \"root\":\n Logger.info(\"Ambari Agent as non-root?\\tYes\")\n else:\n Logger.info(\"Ambari Agent as non-root?\\tNo\")\n else:\n Logger.error(\"Unable to read ambari-agent.ini file\")\n\n else:\n Logger.error(\"Cluster Version {0} should have a state of {1} but is {2}. Make sure to restart all of the Services.\".format(repo_version, expected_state, cluster_version_state))\n else:\n Logger.error(\"Unable to run query: {0}\".format(query))\n elif count > 1:\n # Ensure at least one Cluster Version is CURRENT\n Logger.info(\"Found multiple Cluster versions, checking that exactly one is {0}.\".format(expected_state))\n query = \"SELECT rv.repo_version_id, rv.version, cv.state FROM cluster_version cv JOIN repo_version rv ON cv.repo_version_id = rv.repo_version_id WHERE cv.state = '{0}';\".format(expected_state)\n self.cursor.execute(query)\n rows = self.cursor.fetchall()\n if rows:\n if len(rows) == 1:\n Logger.info(\"Good news; Cluster Version {0} has a state of {1}.\".format(rows[0][1], expected_state))\n self.check_all_hosts_current(rows[0][0], rows[0][1])\n elif len(rows) > 1:\n # Take the repo_version's version column\n repo_versions = [row[1] for row in rows if len(row) == 3]\n Logger.error(\"Found multiple cluster versions with a state of {0}, but only one should be {0}.\\n\" \\\n \"Will need to fix this manually, please contact Support. Cluster Versions found: {1}\".format(expected_state, \", \".join(repo_versions)))\n else:\n Logger.error(\"Unable to run query: {0}\\n\".format(query))\n pass", "def test_rebuilt_server_vcpus(self):\n\n remote_client = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n server_actual_vcpus = remote_client.get_number_of_cpus()\n self.assertEqual(server_actual_vcpus, self.expected_vcpus)", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def checkDBImportInstance(self, instance):\n\n\t\tsession = self.configDBSession()\n\t\tdbimportInstances = aliased(configSchema.dbimportInstances)\n\n\t\tresult = (session.query(\n\t\t\t\tdbimportInstances.name\n\t\t\t)\n\t\t\t.select_from(dbimportInstances)\n\t\t\t.filter(dbimportInstances.name == instance)\n\t\t\t.count())\n\n\t\tif result == 0:\n\t\t\tlogging.error(\"No DBImport Instance with that name can be found in table 'dbimport_instances'\")\n\t\t\tself.remove_temporary_files()\n\t\t\tsys.exit(1)", "def check_fixed(self: AutoScaler) -> AutoScalerState:\n launched_size = len(self.clients)\n registered_size = Client.count_connected()\n task_count = Task.count_remaining()\n log.debug(f'Autoscale check (clients: {registered_size}/{launched_size}, tasks: {task_count})')\n if launched_size < self.min_size:\n log.debug(f'Autoscale min-size reached ({launched_size} < {self.min_size})')\n return AutoScalerState.SCALE\n if launched_size == 0 and task_count == 0:\n return AutoScalerState.WAIT\n if launched_size == 0 and task_count > 0:\n log.debug(f'Autoscale adding client ({task_count} tasks remaining)')\n return AutoScalerState.SCALE\n else:\n return AutoScalerState.WAIT", "def run_instances(self, image_id, min_count=1, max_count=1,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None,\r\n disable_api_termination=False,\r\n instance_initiated_shutdown_behavior=None,\r\n private_ip_address=None,\r\n placement_group=None, client_token=None,\r\n security_group_ids=None):\r\n params = {'ImageId':image_id,\r\n 'MinCount':min_count,\r\n 'MaxCount': max_count}\r\n if key_name:\r\n params['KeyName'] = key_name\r\n if security_group_ids:\r\n l = []\r\n for group in security_group_ids:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroupId')\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroup')\r\n if user_data:\r\n params['UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['AddressingType'] = addressing_type\r\n if instance_type:\r\n params['InstanceType'] = instance_type\r\n if placement:\r\n params['Placement.AvailabilityZone'] = placement\r\n if placement_group:\r\n params['Placement.GroupName'] = placement_group\r\n if kernel_id:\r\n params['KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['SubnetId'] = subnet_id\r\n if private_ip_address:\r\n params['PrivateIpAddress'] = private_ip_address\r\n if block_device_map:\r\n block_device_map.build_list_params(params)\r\n if disable_api_termination:\r\n params['DisableApiTermination'] = 'true'\r\n if instance_initiated_shutdown_behavior:\r\n val = instance_initiated_shutdown_behavior\r\n params['InstanceInitiatedShutdownBehavior'] = val\r\n if client_token:\r\n params['ClientToken'] = client_token\r\n return self.get_object('RunInstances', params, Reservation, verb='POST')", "def __init__(__self__, *,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input['DiskEncryptionConfigurationArgs']] = None,\n disk_encryption_status: Optional[pulumi.Input['DiskEncryptionStatusArgs']] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input['InstanceFailoverReplicaArgs']] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input['OnPremisesConfigurationArgs']] = None,\n out_of_disk_report: Optional[pulumi.Input['SqlOutOfDiskReportArgs']] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input['ReplicaConfigurationArgs']] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input['SqlScheduledMaintenanceArgs']] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input['SslCertArgs']] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input['SettingsArgs']] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None):\n if backend_type is not None:\n pulumi.set(__self__, \"backend_type\", backend_type)\n if connection_name is not None:\n pulumi.set(__self__, \"connection_name\", connection_name)\n if current_disk_size is not None:\n pulumi.set(__self__, \"current_disk_size\", current_disk_size)\n if database_version is not None:\n pulumi.set(__self__, \"database_version\", database_version)\n if disk_encryption_configuration is not None:\n pulumi.set(__self__, \"disk_encryption_configuration\", disk_encryption_configuration)\n if disk_encryption_status is not None:\n pulumi.set(__self__, \"disk_encryption_status\", disk_encryption_status)\n if etag is not None:\n warnings.warn(\"\"\"This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"etag is deprecated: This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.\"\"\")\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if failover_replica is not None:\n pulumi.set(__self__, \"failover_replica\", failover_replica)\n if gce_zone is not None:\n pulumi.set(__self__, \"gce_zone\", gce_zone)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ip_addresses is not None:\n pulumi.set(__self__, \"ip_addresses\", ip_addresses)\n if ipv6_address is not None:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if maintenance_version is not None:\n pulumi.set(__self__, \"maintenance_version\", maintenance_version)\n if master_instance_name is not None:\n pulumi.set(__self__, \"master_instance_name\", master_instance_name)\n if max_disk_size is not None:\n pulumi.set(__self__, \"max_disk_size\", max_disk_size)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if on_premises_configuration is not None:\n pulumi.set(__self__, \"on_premises_configuration\", on_premises_configuration)\n if out_of_disk_report is not None:\n pulumi.set(__self__, \"out_of_disk_report\", out_of_disk_report)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if replica_configuration is not None:\n pulumi.set(__self__, \"replica_configuration\", replica_configuration)\n if replica_names is not None:\n pulumi.set(__self__, \"replica_names\", replica_names)\n if root_password is not None:\n pulumi.set(__self__, \"root_password\", root_password)\n if satisfies_pzs is not None:\n pulumi.set(__self__, \"satisfies_pzs\", satisfies_pzs)\n if scheduled_maintenance is not None:\n pulumi.set(__self__, \"scheduled_maintenance\", scheduled_maintenance)\n if secondary_gce_zone is not None:\n pulumi.set(__self__, \"secondary_gce_zone\", secondary_gce_zone)\n if self_link is not None:\n pulumi.set(__self__, \"self_link\", self_link)\n if server_ca_cert is not None:\n pulumi.set(__self__, \"server_ca_cert\", server_ca_cert)\n if service_account_email_address is not None:\n pulumi.set(__self__, \"service_account_email_address\", service_account_email_address)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if suspension_reason is not None:\n pulumi.set(__self__, \"suspension_reason\", suspension_reason)", "def do_create(self):\n cluster_id = self.entity.cluster_id\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Check cluster size constraint if target cluster is specified\n cluster = cm.Cluster.load(self.context, cluster_id)\n desired = no.Node.count_by_cluster(self.context, cluster_id)\n result = su.check_size_params(cluster, desired, None, None, True)\n if result:\n # cannot place node into the cluster\n no.Node.update(self.context, self.entity.id,\n {'cluster_id': '', 'status': consts.NS_ERROR})\n return self.RES_ERROR, result\n\n res, reason = self.entity.do_create(self.context)\n\n if cluster_id and self.cause == consts.CAUSE_RPC:\n # Update cluster's desired_capacity and re-evaluate its status no\n # matter the creation is a success or not because the node object\n # is already treated as member of the cluster and the node\n # creation may have changed the cluster's status\n cluster.eval_status(self.context, consts.NODE_CREATE,\n desired_capacity=desired)\n if res:\n return self.RES_OK, 'Node created successfully.'\n else:\n return self.RES_ERROR, reason", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def check_runtime_vm(**kwargs):\n\n ti: TaskInstance = kwargs[\"ti\"]\n last_warning_time = ti.xcom_pull(\n key=TerraformTasks.XCOM_WARNING_TIME,\n task_ids=TerraformTasks.TASK_ID_VM_RUNTIME,\n dag_id=TerraformTasks.DAG_ID_DESTROY_VM,\n include_prior_dates=True,\n )\n start_time_vm = ti.xcom_pull(\n key=TerraformTasks.XCOM_START_TIME_VM,\n task_ids=TerraformTasks.TASK_ID_RUN,\n dag_id=TerraformTasks.DAG_ID_CREATE_VM,\n include_prior_dates=True,\n )\n\n if start_time_vm:\n # calculate number of hours passed since start time vm and now\n hours_on = (ti.start_date - start_time_vm).total_seconds() / 3600\n logging.info(\n f\"Start time VM: {start_time_vm}, hours passed since start time: {hours_on}, warning limit: \"\n f\"{TerraformTasks.VM_RUNTIME_H_WARNING}\"\n )\n\n # check if a warning has been sent previously and if so, how many hours ago\n if last_warning_time:\n hours_since_warning = (ti.start_date - last_warning_time).total_seconds() / 3600\n else:\n hours_since_warning = None\n\n # check if the VM has been on longer than the limit\n if hours_on > TerraformTasks.VM_RUNTIME_H_WARNING:\n # check if no warning was sent before or last time was longer ago than warning frequency\n if not hours_since_warning or hours_since_warning > TerraformTasks.WARNING_FREQUENCY_H:\n comments = (\n f\"Worker VM has been on since {start_time_vm}. No. hours passed since then: \"\n f\"{hours_on}.\"\n f\" Warning limit: {TerraformTasks.VM_RUNTIME_H_WARNING}H\"\n )\n project_id = Variable.get(AirflowVars.PROJECT_ID)\n slack_hook = create_slack_webhook(comments, project_id, **kwargs)\n\n # http_hook outputs the secret token, suppressing logging 'info' by setting level to 'warning'\n old_levels = change_task_log_level(logging.WARNING)\n slack_hook.execute()\n # change back to previous levels\n change_task_log_level(old_levels)\n\n ti.xcom_push(TerraformTasks.XCOM_WARNING_TIME, ti.start_date)\n else:\n logging.info(f\"Start time VM unknown.\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,\n base_instance_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distribution_policy_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n named_ports: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n stateful_disks: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,\n target_pools: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,\n target_size: Optional[pulumi.Input[float]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,\n versions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,\n wait_for_instances: Optional[pulumi.Input[bool]] = None,\n __props__=None,\n __name__=None,\n __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['auto_healing_policies'] = auto_healing_policies\n if base_instance_name is None:\n raise TypeError(\"Missing required property 'base_instance_name'\")\n __props__['base_instance_name'] = base_instance_name\n __props__['description'] = description\n __props__['distribution_policy_zones'] = distribution_policy_zones\n __props__['name'] = name\n __props__['named_ports'] = named_ports\n __props__['project'] = project\n if region is None:\n raise TypeError(\"Missing required property 'region'\")\n __props__['region'] = region\n __props__['stateful_disks'] = stateful_disks\n __props__['target_pools'] = target_pools\n __props__['target_size'] = target_size\n __props__['update_policy'] = update_policy\n if versions is None:\n raise TypeError(\"Missing required property 'versions'\")\n __props__['versions'] = versions\n __props__['wait_for_instances'] = wait_for_instances\n __props__['fingerprint'] = None\n __props__['instance_group'] = None\n __props__['self_link'] = None\n super(RegionInstanceGroupManager, __self__).__init__(\n 'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',\n resource_name,\n __props__,\n opts)", "def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}", "def check_stability(self):", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def scale_up_application(asg_name):\n if_verbose(\"Scaling up %s in steps of %d\" % (asg_name, args.instance_count_step))\n current_capacity_count = args.instance_count_step\n while(True):\n check_error(scale_up_autoscaling_group(asg_name, current_capacity_count))\n check_error(check_autoscaling_group_health(asg_name, current_capacity_count))\n\n if args.elb_name:\n asg_instances = [{\"InstanceId\": a[\"InstanceId\"]} for a in asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name], MaxRecords=1)[\"AutoScalingGroups\"][0][\"Instances\"]]\n check_error(check_elb_instance_health(args.elb_name, asg_instances))\n\n if args.instance_count == current_capacity_count:\n break\n else:\n current_capacity_count += args.instance_count_step\n else:\n break\n\n if_verbose(\"Scaling up %s successful\" % asg_name)", "def test_delete_cluster_resource_quota(self):\n pass", "def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance", "def update(self, validate=False):\r\n rs = self.connection.get_all_dbinstances(self.id)\r\n if len(rs) > 0:\r\n for i in rs:\r\n if i.id == self.id:\r\n self.__dict__.update(i.__dict__)\r\n elif validate:\r\n raise ValueError('%s is not a valid Instance ID' % self.id)\r\n return self.status", "def validate_cluster(self, resources, instances):\n instances_names = list(instances.values())\n assert ocp.wait_for_nodes_ready(instances_names), (\n \"Not all nodes reached status Ready\"\n )\n\n ceph_cluster = CephCluster()\n assert ceph_health_check(\n namespace=config.ENV_DATA['cluster_namespace']\n )\n ceph_cluster.cluster_health_check(timeout=60)\n\n # Create resources and run IO for both FS and RBD\n # Unpack resources\n projects, secrets, pools, storageclasses, pvcs, pods = resources[:6]\n\n # Project\n projects.append(helpers.create_project())\n\n # Secrets\n secrets.append(helpers.create_secret(constants.CEPHBLOCKPOOL))\n secrets.append(helpers.create_secret(constants.CEPHFILESYSTEM))\n\n # Pools\n pools.append(helpers.create_ceph_block_pool())\n pools.append(helpers.get_cephfs_data_pool_name())\n\n # Storageclasses\n storageclasses.append(\n helpers.create_storage_class(\n interface_type=constants.CEPHBLOCKPOOL,\n interface_name=pools[0].name,\n secret_name=secrets[0].name\n )\n )\n storageclasses.append(\n helpers.create_storage_class(\n interface_type=constants.CEPHFILESYSTEM,\n interface_name=pools[1],\n secret_name=secrets[1].name\n )\n )\n\n # PVCs\n pvcs.append(helpers.create_pvc(\n sc_name=storageclasses[0].name, namespace=projects[0].namespace)\n )\n pvcs.append(helpers.create_pvc(\n sc_name=storageclasses[1].name, namespace=projects[0].namespace)\n )\n\n # Pods\n pods.append(\n helpers.create_pod(\n interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvcs[0].name,\n namespace=projects[0].namespace\n )\n )\n pods.append(\n helpers.create_pod(\n interface_type=constants.CEPHFILESYSTEM, pvc_name=pvcs[1].name,\n namespace=projects[0].namespace\n )\n )\n\n # Run IO\n for pod in pods:\n pod.run_io('fs', '1G')\n for pod in pods:\n fio_result = pod.get_fio_results()\n logger.info(f\"IOPs after FIO for pod {pod.name}:\")\n logger.info(\n f\"Read: {fio_result.get('jobs')[0].get('read').get('iops')}\"\n )\n logger.info(\n f\"Write: {fio_result.get('jobs')[0].get('write').get('iops')}\"\n )" ]
[ "0.61971885", "0.59949195", "0.5993477", "0.59486884", "0.5910418", "0.5794631", "0.5754411", "0.57093704", "0.56933165", "0.5670425", "0.5641884", "0.56177545", "0.5594963", "0.5538643", "0.5519282", "0.54935545", "0.5492039", "0.5479421", "0.5477119", "0.5459619", "0.54518646", "0.5447389", "0.5404465", "0.53799087", "0.53759754", "0.5370237", "0.5363215", "0.536025", "0.53299624", "0.5326488", "0.52986944", "0.5287418", "0.5285254", "0.5275934", "0.52728933", "0.5259769", "0.52591485", "0.52578896", "0.5246541", "0.521914", "0.5205483", "0.5204072", "0.520206", "0.519997", "0.51962024", "0.519003", "0.51882976", "0.51807684", "0.5180208", "0.51744485", "0.51709235", "0.51693946", "0.5166635", "0.5150243", "0.51486605", "0.514812", "0.5144973", "0.5133093", "0.51280355", "0.512684", "0.51236236", "0.51209235", "0.51031435", "0.50990075", "0.5098276", "0.5090237", "0.5090008", "0.5072186", "0.50698817", "0.5069314", "0.5061647", "0.5050946", "0.5048608", "0.50461906", "0.5044358", "0.5036744", "0.50339293", "0.503332", "0.502055", "0.5019684", "0.5019488", "0.5017071", "0.50168854", "0.5011342", "0.4996107", "0.4982483", "0.4969288", "0.49659935", "0.49628243", "0.4960333", "0.49589756", "0.49474317", "0.49335286", "0.4931512", "0.49282882", "0.4924568", "0.4923461", "0.4922363", "0.49137038", "0.4907616", "0.4903924" ]
0.0
-1
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
def migrate_available_zone_with_options( self, request: dds_20151201_models.MigrateAvailableZoneRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.MigrateAvailableZoneResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.effective_time): query['EffectiveTime'] = request.effective_time if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.vswitch): query['Vswitch'] = request.vswitch if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='MigrateAvailableZone', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.MigrateAvailableZoneResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "async def migrate_available_zone_with_options_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def migrate_available_zone_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return await self.migrate_available_zone_with_options_async(request, runtime)", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run", "def test_transform_and_load_gcp_instances_and_nics(neo4j_session):\n instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE]\n instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses)\n cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG)\n\n instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'\n instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1'\n\n nic_query = \"\"\"\n MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface)\n OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag)\n RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip\n \"\"\"\n objects = neo4j_session.run(nic_query)\n actual_nodes = {\n (\n o['i.id'],\n o['i.zone_name'],\n o['i.project_id'],\n o['nic.nic_id'],\n o['nic.private_ip'],\n o['t.value'],\n o['r.lastupdated'],\n ) for o in objects\n }\n\n expected_nodes = {\n (\n instance_id1,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n None,\n TEST_UPDATE_TAG,\n ),\n (\n instance_id2,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0',\n '10.0.0.2',\n 'test',\n TEST_UPDATE_TAG,\n ),\n }\n assert actual_nodes == expected_nodes", "def mongoRestore( self, db, infile ):\n\t\tsys_command = \"mongorestore --db \" + db + \" --host \" + self.host + \" --port \" + str( self.port ) + \" \" + infile \n\t\tos.system(sys_command)", "def test_migrate_volume_generic_cross_az(self, migrate_volume_completion,\n nova_api):\n original_create = objects.Volume.create\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n def my_create(self, *args, **kwargs):\n self.status = 'available'\n original_create(self, *args, **kwargs)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host)\n\n host_obj = {'host': 'newhost', 'capabilities': {}}\n create_vol = self.patch('cinder.objects.Volume.create',\n side_effect=my_create, autospec=True)\n\n with mock.patch.object(self.volume, '_copy_volume_data') as copy_mock:\n self.volume._migrate_volume_generic(self.context, volume, host_obj,\n None)\n copy_mock.assert_called_with(self.context, volume, mock.ANY,\n remote='dest')\n migrate_volume_completion.assert_called_with(\n self.context, volume, mock.ANY, error=False)\n\n nova_api.return_value.update_server_volume.assert_not_called()\n\n self.assertEqual(dst_az,\n create_vol.call_args[0][0]['availability_zone'])", "def migrate_to_other_zone_with_options(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateToOtherZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateToOtherZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def setup_target_db(self):\n conn = MongoClient(host=self._target_host)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def get_mongo_config ( subnets ) :\n replications = \"\"\n primary_ip = get_primary_node(subnets)\n for subnet in subnets :\n if primary_ip != subnet.cidr_block :\n replication = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n replication = replication.replace(\".\", \"-\")\n replications = replications + \"\\nrs.add(\\\"ip-\"+replication+\":27017\\\");\"\n \n \n return \"\"\"#!/bin/bash -ex\n exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n:>/etc/replication.js\necho 'rs.initiate();\"\"\"+replications+\"\"\"\n\n'>>/etc/replication.js\n\"\"\"", "def upgrade_to_10():\n\n def switch_keys(doc, x, y):\n doc[y] = doc[x]\n doc.pop(x, None)\n\n\n jobs = config.db.jobs.find({'destination.container_type': {'$exists': True}})\n\n for job in jobs:\n switch_keys(job, 'algorithm_id', 'name')\n\n for key in job['inputs'].keys():\n inp = job['inputs'][key]\n\n switch_keys(inp, 'container_type', 'type')\n switch_keys(inp, 'container_id', 'id')\n switch_keys(inp, 'filename', 'name')\n\n\n dest = job['destination']\n switch_keys(dest, 'container_type', 'type')\n switch_keys(dest, 'container_id', 'id')\n\n config.db.jobs.update(\n {'_id': job['_id']},\n job\n )", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def restore_cluster(ctx, zone, db_instance, from_zone=None, from_db_instance=None, backup_folder=None, target_time=None):\n\n if from_zone == None:\n from_zone = zone\n if from_db_instance == None:\n from_db_instance = db_instance\n if backup_folder == None:\n get_env('AWS_SECRET_ACCESS_KEY', 'to list the backup buckets at AWS S3.')\n get_env('AWS_ACCESS_KEY_ID', 'to list the backup buckets at AWS S3.')\n get_env('AWS_REGION', 'to list the backup buckets at AWS S3.')\n print(\"Available values for --backup-folder :\\n\")\n res = ctx.run(\"aws s3 ls \" + backup_bucket_name(from_zone, from_db_instance), pty=True, hide=\"stdout\")\n for line in res.stdout.splitlines():\n print(re.search(\"PRE ([^ /]+)\", line).group(1))\n else:\n recover_from = \"{}/{}\".format(backup_bucket_name(from_zone, from_db_instance), backup_folder)\n print(\"\"\"\n Starting recovery\n \"\"\")\n more_vars = {'recover_from': recover_from}\n if target_time:\n more_vars['recovery_target_time'] = '\"{}\"'.format(target_time) # need quoting due to space char\n\n ctx.run(init_pg_servers_play_run(zone, db_instance, more_vars=more_vars), pty=True, echo=True)", "def _activate_new_zone(self):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n success = self._api.domain.zone.version.set(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n if not success:\n raise GandiApiException('Failed to activate new zone;')\n else:\n logging.info('New zone version activated.')", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_07_migrate_vm_live_with_snapshots(self):\n global vm\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def load_mongo_configuration(ec2_conn,base_name,params ):\n print \"loading mongo configurings\"\n \n ## Allow security from build server to mongodb\n app_type = 'MONGO'\n \n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n \n try :\n mongo_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n mongo_host = params.get( 'host' )\n mongo_port = params.get( 'port' )\n mongo_username = params.get( 'user-name' )\n mongo_password = params.get( 'password' )\n \n db_name = params.get( 'db_name' )\n collection_name = params.get( 'collection_name' )\n \n documents = params.get( 'documents' )\n \n uri = \"\"\n if len( mongo_username ) > 0 :\n uri = \"mongodb://\"+mongo_username+\":\"+mongo_password+\"@\"+mongo_host+\":\"+mongo_port+\"/\"\n else :\n uri = \"mongodb://\"+mongo_host+\":\"+mongo_port+\"/\"\n \n print \"Mongo Connect URL:\" +uri\n \n \n client = MongoClient(uri)\n \n\n db = client[db_name]\n collection = db[collection_name ]\n \n collection.remove()\n \n for document in documents :\n document = json.dumps(document)\n document = loads(document)\n collection.insert(document)\n document['createdTime'] = datetime.datetime.utcnow()\n collection.save(document)\n \n ## At the end revoke the build server rule \n try :\n mongo_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"\n \n print \"configured\"", "def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def igraph2mongo(graph,collection,mode='OUT',overwrite = False):\r\n for i in graph.vs:\r\n if not list(collection.find({'_id':i.index})):\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n post_id = collection.insert_one(post).inserted_id\r\n print( \"node \",post_id,\" added\")\r\n elif overwrite == True:\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n collection.replace_one({'_id':i.index},post)\r\n print(\"node \",i.index,\" replaced\")\r\n else:\r\n# print(\"THIS object has the _id\",i.index,list(collection.find({'_id':i.index})))\r\n pass\r\n if overwrite == True:\r\n print(collection, \"has been changed\")", "def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")", "def test_13_migrate_vm_live_resize_volume_on_remote(self):\n global vm2\n global data_disk_2\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def create_database():\n\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n\n coll_ebal = db.get_collection('ebal')\n coll_unfcc = db.get_collection('unfcc')\n\n df_ebal = pd.read_csv(EBAL_FILE)\n df_unfcc = pd.read_csv(UNFCC_FILE)\n df_ebal = decoding_codes(df_ebal)\n\n coco_dict = {}\n for i in df_ebal[\"REF_AREA\"].unique():\n # if i not in coco_dict:\n coco_dict[i] = coco.convert(i, to='iso3')\n coco_dict[\"France-Monaco\"] = coco.convert(\"France\", to='iso3')\n coco_dict[\"Italy-San Marino\"] = coco.convert(\"Italy\", to='iso3')\n coco_dict[\"Switzerland-Liechtenstein\"] = coco.convert(\"Switzerland\", to='iso3')\n df_ebal[\"REF_AREA\"] = [coco_dict[i] for i in df_ebal[\"REF_AREA\"]]\n\n data_json_unfcc = json.loads(df_unfcc.to_json(orient='records'))\n data_json_ebal = json.loads(df_ebal.to_json(orient='records'))\n\n\n result = coll_ebal.insert_many(data_json_ebal)\n logger.info('Inserted a total of {} records in EBAL'.format(len(result.inserted_ids)))\n result = coll_unfcc.insert_many(data_json_unfcc)\n logger.info('Inserted a total of {} records in UNFCC'.format(len(result.inserted_ids)))\n\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n finally:\n client.close()", "def _regrid_ak_ext_ana_pcp_stage4(supplemental_precip, config_options, wrf_hydro_geo_meta, mpi_config):\n\n # If the expected file is missing, this means we are allowing missing files, simply\n # exit out of this routine as the regridded fields have already been set to NDV.\n if not os.path.exists(supplemental_precip.file_in1):\n return\n\n # Check to see if the regrid complete flag for this\n # output time step is true. This entails the necessary\n # inputs have already been regridded and we can move on.\n if supplemental_precip.regridComplete:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"No StageIV regridding required for this timestep.\"\n err_handler.log_msg(config_options, mpi_config)\n return\n\n # Create a path for a temporary NetCDF files that will\n # be created through the wgrib2 process.\n stage4_tmp_nc = config_options.scratch_dir + \"/STAGEIV_TMP-{}.nc\".format(mkfilename())\n\n lat_var = \"latitude\"\n lon_var = \"longitude\"\n\n if supplemental_precip.fileType != NETCDF:\n # This file shouldn't exist.... but if it does (previously failed\n # execution of the program), remove it.....\n if mpi_config.rank == 0:\n if os.path.isfile(stage4_tmp_nc):\n config_options.statusMsg = \"Found old temporary file: \" + stage4_tmp_nc + \" - Removing.....\"\n err_handler.log_warning(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = f\"Unable to remove temporary file: {stage4_tmp_nc}\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Create a temporary NetCDF file from the GRIB2 file.\n cmd = f'$WGRIB2 -match \"APCP:surface:0-6 hour acc fcst\" {supplemental_precip.file_in2} -netcdf {stage4_tmp_nc}'\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"WGRIB2 command: {cmd}\"\n err_handler.log_msg(config_options, mpi_config)\n id_tmp = ioMod.open_grib2(supplemental_precip.file_in2, stage4_tmp_nc, cmd,\n config_options, mpi_config, inputVar=None)\n err_handler.check_program_status(config_options, mpi_config)\n else:\n create_link(\"STAGEIV-PCP\", supplemental_precip.file_in2, stage4_tmp_nc, config_options, mpi_config)\n id_tmp = ioMod.open_netcdf_forcing(stage4_tmp_nc, config_options, mpi_config, False, lat_var, lon_var)\n\n # Check to see if we need to calculate regridding weights.\n calc_regrid_flag = check_supp_pcp_regrid_status(id_tmp, supplemental_precip, config_options,\n wrf_hydro_geo_meta, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n if calc_regrid_flag:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"Calculating STAGE IV regridding weights.\"\n err_handler.log_msg(config_options, mpi_config)\n calculate_supp_pcp_weights(supplemental_precip, id_tmp, stage4_tmp_nc, config_options, mpi_config, lat_var, lon_var)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Regrid the input variables.\n var_tmp = None\n if mpi_config.rank == 0:\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"Regridding STAGE IV '{supplemental_precip.netcdf_var_names[-1]}' Precipitation.\"\n err_handler.log_msg(config_options, mpi_config)\n try:\n var_tmp = id_tmp.variables[supplemental_precip.netcdf_var_names[-1]][0,:,:]\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to extract precipitation from STAGE IV file: \" + \\\n supplemental_precip.file_in1 + \" (\" + str(err) + \")\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n var_sub_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_in.data[:, :] = var_sub_tmp\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to place STAGE IV precipitation into local ESMF field: \" + str(err)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_out = supplemental_precip.regridObj(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out)\n except ValueError as ve:\n config_options.errMsg = \"Unable to regrid STAGE IV supplemental precipitation: \" + str(ve)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Set any pixel cells outside the input domain to the global missing value.\n try:\n supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.regridded_mask == 0)] = \\\n config_options.globalNdv\n except (ValueError, ArithmeticError) as npe:\n config_options.errMsg = \"Unable to run mask search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n supplemental_precip.regridded_precip2[:, :] = supplemental_precip.esmf_field_out.data\n err_handler.check_program_status(config_options, mpi_config)\n\n # Convert the 6-hourly precipitation total to a rate of mm/s\n try:\n ind_valid = np.where(supplemental_precip.regridded_precip2 != config_options.globalNdv)\n supplemental_precip.regridded_precip2[ind_valid] = supplemental_precip.regridded_precip2[ind_valid] / 3600.0\n del ind_valid\n except (ValueError, ArithmeticError, AttributeError, KeyError) as npe:\n config_options.errMsg = \"Unable to run NDV search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # If we are on the first timestep, set the previous regridded field to be\n # the latest as there are no states for time 0.\n if config_options.current_output_step == 1:\n supplemental_precip.regridded_precip1[:, :] = \\\n supplemental_precip.regridded_precip2[:, :]\n err_handler.check_program_status(config_options, mpi_config)\n\n # Close the temporary NetCDF file and remove it.\n if mpi_config.rank == 0:\n try:\n id_tmp.close()\n except OSError:\n config_options.errMsg = \"Unable to close NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = \"Unable to remove NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)", "def test_migrate_on_compute_fail(self):\n server, source_host, target_host = self._create_server()\n\n # Wrap _prep_resize so we can concurrently delete the server.\n original_prep_resize = compute_manager.ComputeManager._prep_resize\n\n def wrap_prep_resize(*args, **kwargs):\n self._delete_server(server)\n return original_prep_resize(*args, **kwargs)\n\n self.stub_out('nova.compute.manager.ComputeManager._prep_resize',\n wrap_prep_resize)\n\n # Now start the cold migration which will fail in the dest compute.\n self.api.post_server_action(server['id'], {'migrate': None})\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the allocation revert happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.MIGRATE, 'compute_prep_resize')\n self._assert_no_allocations(server)", "def tear_down_mongo(self):\r\n split_db = self.split_mongo.db\r\n # old_mongo doesn't give a db attr, but all of the dbs are the same\r\n split_db.drop_collection(self.old_mongo.collection)", "def test_update_virtualization_realm(self):\n pass", "def __init__(self, source='10.0.2.32', is_local=False):\n super().__init__(source, is_local)\n self.client = MongoClient(source)", "def _mongodump(self, port, errors):\n ret = run(\"mongodump %s --forceTableScan --host %s:%d -o %s/%s\" % (\"--oplog\" if self.name != \"config\" and self.name != \"mongos\" and not self.can_restart else \"\", self.host, port, self.backup_path, self.name))\n if ret != 0:\n errors.put(Exception(\"Error dumping %s server\" % self.name))\n traceback.print_exc()\n return\n\n ret = run(\"cd %s && tar zcvf %s.tar.gz %s && rm -rf %s\" % (self.backup_path, self.name, self.name, self.name))\n if ret != 0:\n errors.put(Exception(\"Error zipping %s server backup\" % self.name))\n traceback.print_exc()", "def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def test_port_update_after_vm_migration(self):\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def _requires_inmigrate_from(self):\n existing = locate_live_service(self.consul, \"qemu-\" + self.name)\n\n if existing and existing[\"Address\"] != self.this_host:\n # Consul knows about a running VM. Lets try a migration.\n return existing[\"Address\"]\n\n if self.ceph.is_unlocked():\n # Consul doesn't know about a running VM and no volume is locked.\n # It doesn't make sense to live migrate this VM.\n return None\n\n if self.ceph.locked_by_me():\n # Consul doesn't know about a running VM and the volume is\n # locked by me, so it doesn't make sense to live migrate the VM.\n return None\n\n # The VM seems to be locked somewhere else, try to migrate it from\n # there.\n return self.ceph.locked_by()", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def nfvi_live_migrate_instance(instance_uuid, callback, to_host_name=None,\n block_storage_migration='auto', context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n return cmd_id", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def cleanUp(name):\n clovr = pymongo.Connection().clovr\n clovr.clusters.remove(dict(name=name))", "def OSSupportsIPv4(self) -> bool:", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def prepare_maintenance(self, errors):\n self.cmd_line_opts = self.client['admin'].command('getCmdLineOpts')\n\n if not self.can_restart:\n return\n port = 27017\n specified = False\n repl_index = None\n new_cmd_line = self.cmd_line_opts['argv'][:]\n for i in range(len(new_cmd_line)):\n if new_cmd_line[i] == '--port':\n logging.info(str(new_cmd_line))\n self.maintenance_port = int(new_cmd_line[i+1]) + 20000\n new_cmd_line[i+1] = str(self.maintenance_port)\n specified = True\n if new_cmd_line[i] == '--replSet':\n repl_index = i\n if not specified:\n new_cmd_line.append('--port')\n new_cmd_line.append('47017')\n if repl_index is not None:\n del new_cmd_line[repl_index+1]\n del new_cmd_line[repl_index]\n try:\n self._shutdown()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()\n return\n run(\" \".join(new_cmd_line))\n self.client = pymongo.MongoClient(self.host, self.maintenance_port)", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)", "def test_replace_host_subnet(self):\n pass", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def migrate_volume(self, ctxt, volume, host):\n LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',\n {'id': volume['id'], 'host': host})\n\n false_ret = (False, None)\n\n if volume['status'] not in ('available', 'retyping'):\n LOG.warning(\"Volume status must be 'available' or 'retyping'.\"\n \" Current volume status: %s\", volume['status'])\n return false_ret\n\n if 'capabilities' not in host:\n LOG.warning(\"Unsupported host. No capabilities found\")\n return false_ret\n\n capabilities = host['capabilities']\n ns_shares = capabilities['ns_shares']\n dst_parts = capabilities['location_info'].split(':')\n dst_host, dst_volume = dst_parts[1:]\n\n if (capabilities.get('vendor_name') != 'Nexenta' or\n dst_parts[0] != self.__class__.__name__ or\n capabilities['free_capacity_gb'] < volume['size']):\n return false_ret\n\n nms = self.share2nms[volume['provider_location']]\n ssh_bindings = nms.appliance.ssh_list_bindings()\n shares = []\n for bind in ssh_bindings:\n for share in ns_shares:\n if (share.startswith(ssh_bindings[bind][3]) and\n ns_shares[share] >= volume['size']):\n shares.append(share)\n if len(shares) == 0:\n LOG.warning(\"Remote NexentaStor appliance at %s should be \"\n \"SSH-bound.\", share)\n return false_ret\n share = sorted(shares, key=ns_shares.get, reverse=True)[0]\n snapshot = {\n 'volume_name': volume['name'],\n 'volume_id': volume['id'],\n 'name': utils.get_migrate_snapshot_name(volume)\n }\n self.create_snapshot(snapshot)\n location = volume['provider_location']\n src = '%(share)s/%(volume)s@%(snapshot)s' % {\n 'share': location.split(':')[1].split('volumes/')[1],\n 'volume': volume['name'],\n 'snapshot': snapshot['name']\n }\n dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])\n try:\n nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot send source snapshot %(src)s to \"\n \"destination %(dst)s. Reason: %(exc)s\",\n {'src': src, 'dst': dst, 'exc': exc})\n return false_ret\n finally:\n try:\n self.delete_snapshot(snapshot)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary source snapshot \"\n \"%(src)s on NexentaStor Appliance: %(exc)s\",\n {'src': src, 'exc': exc})\n try:\n self.delete_volume(volume)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete source volume %(volume)s on \"\n \"NexentaStor Appliance: %(exc)s\",\n {'volume': volume['name'], 'exc': exc})\n\n dst_nms = self._get_nms_for_url(capabilities['nms_url'])\n dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],\n volume['name'], snapshot['name'])\n try:\n dst_nms.snapshot.destroy(dst_snapshot, '')\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary destination snapshot \"\n \"%(dst)s on NexentaStor Appliance: %(exc)s\",\n {'dst': dst_snapshot, 'exc': exc})\n return True, {'provider_location': share}", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def init_nova_network_ips(instance, server):\n ctx = context.ctx()\n\n management_ip = instance.management_ip\n internal_ip = instance.internal_ip\n\n for network_label in server.networks:\n nova_network = nova.client().networks.find(label=network_label)\n network = netaddr.IPNetwork(nova_network.cidr)\n for ip in server.networks[network_label]:\n if netaddr.IPAddress(ip) in network:\n internal_ip = instance.internal_ip or ip\n else:\n management_ip = instance.management_ip or ip\n\n if not CONF.use_floating_ips:\n management_ip = internal_ip\n\n conductor.instance_update(ctx, instance, {\"management_ip\": management_ip,\n \"internal_ip\": internal_ip})\n\n return internal_ip and management_ip", "def clone_collection(self, src_mongodb_uri, src_database, src_collection):\n # drop \"mongodb://\" suffix from uri\n src_conn = src_mongodb_uri[10:]\n if src_conn[-1] == \"/\":\n src_conn = src_conn[:-1]\n self.client.admin.command(\n {\"cloneCollection\": src_database + \".\" + src_collection, \"from\": src_conn}\n )", "def post_instance_ip_create(self, resource_dict):\n pass", "def zone_map(zoneadm, passthru='__all__'):\n\n ret = {}\n\n for z in zoneadm:\n chunks = z.split(':')\n\n if len(chunks) < 8:\n raise NotImplementedError(\n 'cannot parse zoneadm output: %d fields in %s' %\n (len(chunks), zoneadm))\n\n if chunks[0] == '-':\n continue\n\n if passthru == '__all__' or chunks[1] in passthru:\n ret[chunks[0]] = chunks[1]\n\n \"\"\"\n Here's a cheat: if we're in an NGZ, we don't actually care about\n the zone ID. In fact, kstat `link` instances *don't* match to\n zone ID in NGZs. So, we fudge the key.\n \"\"\"\n\n if len(zoneadm) == 1 and ret.keys()[0] != 'global':\n ret = {'0': ret.values()[0]}\n\n return ret", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def migrateLXCContainer(self,node,vmid,target):\n post_data = {'target': str(target)}\n data = self.connect('post','nodes/%s/lxc/%s/migrate' % (node,vmid), post_data)\n return data", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def mongodb_drop():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n client = client.drop_database(\"tweetbase\")", "def save_list_mongo(listz):\t\n\tconnection = pymongo.Connection('localhost', 27017)\n\tdb = connection.database\n\tcollection = db.warez_collection", "def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')", "def nocleanup():\n _lbs = kivaloo.servers.Server_lbs()\n _kvlds = kivaloo.servers.Server_kvlds()", "def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'" ]
[ "0.52417934", "0.50473094", "0.5008553", "0.49998647", "0.48307848", "0.47682485", "0.47216007", "0.4666874", "0.46228546", "0.45956224", "0.4590136", "0.45401716", "0.45268676", "0.4509593", "0.45067737", "0.4491239", "0.44909394", "0.4489098", "0.44764355", "0.44562212", "0.44552153", "0.4452588", "0.44209155", "0.4413766", "0.44047692", "0.44022316", "0.43876037", "0.43639293", "0.43613485", "0.43509868", "0.4350281", "0.43180835", "0.43142134", "0.43064004", "0.43009833", "0.42890292", "0.4267697", "0.42645198", "0.4257341", "0.42567968", "0.42564234", "0.42538708", "0.42514712", "0.42497528", "0.4248357", "0.4247581", "0.42430687", "0.42307425", "0.4230253", "0.42295033", "0.42262346", "0.42239565", "0.42197517", "0.42053407", "0.42049512", "0.4203045", "0.42021617", "0.42013466", "0.4198466", "0.41966215", "0.41961485", "0.41927412", "0.41557997", "0.41503388", "0.4149511", "0.41447994", "0.41431043", "0.41431043", "0.41413295", "0.4138478", "0.41375104", "0.4135161", "0.41278815", "0.4126963", "0.41255993", "0.4123824", "0.4123794", "0.41236663", "0.41236663", "0.4123581", "0.41211066", "0.41202372", "0.41177627", "0.4099029", "0.40968773", "0.4092736", "0.40895063", "0.4089035", "0.40839347", "0.40738735", "0.40738624", "0.40725204", "0.40713844", "0.40702423", "0.40662047", "0.40658936", "0.40632343", "0.40607414", "0.4058552", "0.40555742" ]
0.5335884
0
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
async def migrate_available_zone_with_options_async( self, request: dds_20151201_models.MigrateAvailableZoneRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.MigrateAvailableZoneResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.effective_time): query['EffectiveTime'] = request.effective_time if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.vswitch): query['Vswitch'] = request.vswitch if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='MigrateAvailableZone', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.MigrateAvailableZoneResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone_with_options(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def migrate_available_zone_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return await self.migrate_available_zone_with_options_async(request, runtime)", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def test_transform_and_load_gcp_instances_and_nics(neo4j_session):\n instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE]\n instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses)\n cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG)\n\n instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'\n instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1'\n\n nic_query = \"\"\"\n MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface)\n OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag)\n RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip\n \"\"\"\n objects = neo4j_session.run(nic_query)\n actual_nodes = {\n (\n o['i.id'],\n o['i.zone_name'],\n o['i.project_id'],\n o['nic.nic_id'],\n o['nic.private_ip'],\n o['t.value'],\n o['r.lastupdated'],\n ) for o in objects\n }\n\n expected_nodes = {\n (\n instance_id1,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n None,\n TEST_UPDATE_TAG,\n ),\n (\n instance_id2,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0',\n '10.0.0.2',\n 'test',\n TEST_UPDATE_TAG,\n ),\n }\n assert actual_nodes == expected_nodes", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run", "def mongoRestore( self, db, infile ):\n\t\tsys_command = \"mongorestore --db \" + db + \" --host \" + self.host + \" --port \" + str( self.port ) + \" \" + infile \n\t\tos.system(sys_command)", "def test_migrate_volume_generic_cross_az(self, migrate_volume_completion,\n nova_api):\n original_create = objects.Volume.create\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n def my_create(self, *args, **kwargs):\n self.status = 'available'\n original_create(self, *args, **kwargs)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host)\n\n host_obj = {'host': 'newhost', 'capabilities': {}}\n create_vol = self.patch('cinder.objects.Volume.create',\n side_effect=my_create, autospec=True)\n\n with mock.patch.object(self.volume, '_copy_volume_data') as copy_mock:\n self.volume._migrate_volume_generic(self.context, volume, host_obj,\n None)\n copy_mock.assert_called_with(self.context, volume, mock.ANY,\n remote='dest')\n migrate_volume_completion.assert_called_with(\n self.context, volume, mock.ANY, error=False)\n\n nova_api.return_value.update_server_volume.assert_not_called()\n\n self.assertEqual(dst_az,\n create_vol.call_args[0][0]['availability_zone'])", "def migrate_to_other_zone_with_options(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateToOtherZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateToOtherZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def setup_target_db(self):\n conn = MongoClient(host=self._target_host)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def get_mongo_config ( subnets ) :\n replications = \"\"\n primary_ip = get_primary_node(subnets)\n for subnet in subnets :\n if primary_ip != subnet.cidr_block :\n replication = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n replication = replication.replace(\".\", \"-\")\n replications = replications + \"\\nrs.add(\\\"ip-\"+replication+\":27017\\\");\"\n \n \n return \"\"\"#!/bin/bash -ex\n exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n:>/etc/replication.js\necho 'rs.initiate();\"\"\"+replications+\"\"\"\n\n'>>/etc/replication.js\n\"\"\"", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def upgrade_to_10():\n\n def switch_keys(doc, x, y):\n doc[y] = doc[x]\n doc.pop(x, None)\n\n\n jobs = config.db.jobs.find({'destination.container_type': {'$exists': True}})\n\n for job in jobs:\n switch_keys(job, 'algorithm_id', 'name')\n\n for key in job['inputs'].keys():\n inp = job['inputs'][key]\n\n switch_keys(inp, 'container_type', 'type')\n switch_keys(inp, 'container_id', 'id')\n switch_keys(inp, 'filename', 'name')\n\n\n dest = job['destination']\n switch_keys(dest, 'container_type', 'type')\n switch_keys(dest, 'container_id', 'id')\n\n config.db.jobs.update(\n {'_id': job['_id']},\n job\n )", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def restore_cluster(ctx, zone, db_instance, from_zone=None, from_db_instance=None, backup_folder=None, target_time=None):\n\n if from_zone == None:\n from_zone = zone\n if from_db_instance == None:\n from_db_instance = db_instance\n if backup_folder == None:\n get_env('AWS_SECRET_ACCESS_KEY', 'to list the backup buckets at AWS S3.')\n get_env('AWS_ACCESS_KEY_ID', 'to list the backup buckets at AWS S3.')\n get_env('AWS_REGION', 'to list the backup buckets at AWS S3.')\n print(\"Available values for --backup-folder :\\n\")\n res = ctx.run(\"aws s3 ls \" + backup_bucket_name(from_zone, from_db_instance), pty=True, hide=\"stdout\")\n for line in res.stdout.splitlines():\n print(re.search(\"PRE ([^ /]+)\", line).group(1))\n else:\n recover_from = \"{}/{}\".format(backup_bucket_name(from_zone, from_db_instance), backup_folder)\n print(\"\"\"\n Starting recovery\n \"\"\")\n more_vars = {'recover_from': recover_from}\n if target_time:\n more_vars['recovery_target_time'] = '\"{}\"'.format(target_time) # need quoting due to space char\n\n ctx.run(init_pg_servers_play_run(zone, db_instance, more_vars=more_vars), pty=True, echo=True)", "def _activate_new_zone(self):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n success = self._api.domain.zone.version.set(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n if not success:\n raise GandiApiException('Failed to activate new zone;')\n else:\n logging.info('New zone version activated.')", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_07_migrate_vm_live_with_snapshots(self):\n global vm\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def load_mongo_configuration(ec2_conn,base_name,params ):\n print \"loading mongo configurings\"\n \n ## Allow security from build server to mongodb\n app_type = 'MONGO'\n \n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n \n try :\n mongo_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n mongo_host = params.get( 'host' )\n mongo_port = params.get( 'port' )\n mongo_username = params.get( 'user-name' )\n mongo_password = params.get( 'password' )\n \n db_name = params.get( 'db_name' )\n collection_name = params.get( 'collection_name' )\n \n documents = params.get( 'documents' )\n \n uri = \"\"\n if len( mongo_username ) > 0 :\n uri = \"mongodb://\"+mongo_username+\":\"+mongo_password+\"@\"+mongo_host+\":\"+mongo_port+\"/\"\n else :\n uri = \"mongodb://\"+mongo_host+\":\"+mongo_port+\"/\"\n \n print \"Mongo Connect URL:\" +uri\n \n \n client = MongoClient(uri)\n \n\n db = client[db_name]\n collection = db[collection_name ]\n \n collection.remove()\n \n for document in documents :\n document = json.dumps(document)\n document = loads(document)\n collection.insert(document)\n document['createdTime'] = datetime.datetime.utcnow()\n collection.save(document)\n \n ## At the end revoke the build server rule \n try :\n mongo_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"\n \n print \"configured\"", "def igraph2mongo(graph,collection,mode='OUT',overwrite = False):\r\n for i in graph.vs:\r\n if not list(collection.find({'_id':i.index})):\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n post_id = collection.insert_one(post).inserted_id\r\n print( \"node \",post_id,\" added\")\r\n elif overwrite == True:\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n collection.replace_one({'_id':i.index},post)\r\n print(\"node \",i.index,\" replaced\")\r\n else:\r\n# print(\"THIS object has the _id\",i.index,list(collection.find({'_id':i.index})))\r\n pass\r\n if overwrite == True:\r\n print(collection, \"has been changed\")", "def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")", "def test_13_migrate_vm_live_resize_volume_on_remote(self):\n global vm2\n global data_disk_2\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def _regrid_ak_ext_ana_pcp_stage4(supplemental_precip, config_options, wrf_hydro_geo_meta, mpi_config):\n\n # If the expected file is missing, this means we are allowing missing files, simply\n # exit out of this routine as the regridded fields have already been set to NDV.\n if not os.path.exists(supplemental_precip.file_in1):\n return\n\n # Check to see if the regrid complete flag for this\n # output time step is true. This entails the necessary\n # inputs have already been regridded and we can move on.\n if supplemental_precip.regridComplete:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"No StageIV regridding required for this timestep.\"\n err_handler.log_msg(config_options, mpi_config)\n return\n\n # Create a path for a temporary NetCDF files that will\n # be created through the wgrib2 process.\n stage4_tmp_nc = config_options.scratch_dir + \"/STAGEIV_TMP-{}.nc\".format(mkfilename())\n\n lat_var = \"latitude\"\n lon_var = \"longitude\"\n\n if supplemental_precip.fileType != NETCDF:\n # This file shouldn't exist.... but if it does (previously failed\n # execution of the program), remove it.....\n if mpi_config.rank == 0:\n if os.path.isfile(stage4_tmp_nc):\n config_options.statusMsg = \"Found old temporary file: \" + stage4_tmp_nc + \" - Removing.....\"\n err_handler.log_warning(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = f\"Unable to remove temporary file: {stage4_tmp_nc}\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Create a temporary NetCDF file from the GRIB2 file.\n cmd = f'$WGRIB2 -match \"APCP:surface:0-6 hour acc fcst\" {supplemental_precip.file_in2} -netcdf {stage4_tmp_nc}'\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"WGRIB2 command: {cmd}\"\n err_handler.log_msg(config_options, mpi_config)\n id_tmp = ioMod.open_grib2(supplemental_precip.file_in2, stage4_tmp_nc, cmd,\n config_options, mpi_config, inputVar=None)\n err_handler.check_program_status(config_options, mpi_config)\n else:\n create_link(\"STAGEIV-PCP\", supplemental_precip.file_in2, stage4_tmp_nc, config_options, mpi_config)\n id_tmp = ioMod.open_netcdf_forcing(stage4_tmp_nc, config_options, mpi_config, False, lat_var, lon_var)\n\n # Check to see if we need to calculate regridding weights.\n calc_regrid_flag = check_supp_pcp_regrid_status(id_tmp, supplemental_precip, config_options,\n wrf_hydro_geo_meta, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n if calc_regrid_flag:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"Calculating STAGE IV regridding weights.\"\n err_handler.log_msg(config_options, mpi_config)\n calculate_supp_pcp_weights(supplemental_precip, id_tmp, stage4_tmp_nc, config_options, mpi_config, lat_var, lon_var)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Regrid the input variables.\n var_tmp = None\n if mpi_config.rank == 0:\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"Regridding STAGE IV '{supplemental_precip.netcdf_var_names[-1]}' Precipitation.\"\n err_handler.log_msg(config_options, mpi_config)\n try:\n var_tmp = id_tmp.variables[supplemental_precip.netcdf_var_names[-1]][0,:,:]\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to extract precipitation from STAGE IV file: \" + \\\n supplemental_precip.file_in1 + \" (\" + str(err) + \")\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n var_sub_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_in.data[:, :] = var_sub_tmp\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to place STAGE IV precipitation into local ESMF field: \" + str(err)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_out = supplemental_precip.regridObj(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out)\n except ValueError as ve:\n config_options.errMsg = \"Unable to regrid STAGE IV supplemental precipitation: \" + str(ve)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Set any pixel cells outside the input domain to the global missing value.\n try:\n supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.regridded_mask == 0)] = \\\n config_options.globalNdv\n except (ValueError, ArithmeticError) as npe:\n config_options.errMsg = \"Unable to run mask search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n supplemental_precip.regridded_precip2[:, :] = supplemental_precip.esmf_field_out.data\n err_handler.check_program_status(config_options, mpi_config)\n\n # Convert the 6-hourly precipitation total to a rate of mm/s\n try:\n ind_valid = np.where(supplemental_precip.regridded_precip2 != config_options.globalNdv)\n supplemental_precip.regridded_precip2[ind_valid] = supplemental_precip.regridded_precip2[ind_valid] / 3600.0\n del ind_valid\n except (ValueError, ArithmeticError, AttributeError, KeyError) as npe:\n config_options.errMsg = \"Unable to run NDV search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # If we are on the first timestep, set the previous regridded field to be\n # the latest as there are no states for time 0.\n if config_options.current_output_step == 1:\n supplemental_precip.regridded_precip1[:, :] = \\\n supplemental_precip.regridded_precip2[:, :]\n err_handler.check_program_status(config_options, mpi_config)\n\n # Close the temporary NetCDF file and remove it.\n if mpi_config.rank == 0:\n try:\n id_tmp.close()\n except OSError:\n config_options.errMsg = \"Unable to close NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = \"Unable to remove NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)", "def create_database():\n\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n\n coll_ebal = db.get_collection('ebal')\n coll_unfcc = db.get_collection('unfcc')\n\n df_ebal = pd.read_csv(EBAL_FILE)\n df_unfcc = pd.read_csv(UNFCC_FILE)\n df_ebal = decoding_codes(df_ebal)\n\n coco_dict = {}\n for i in df_ebal[\"REF_AREA\"].unique():\n # if i not in coco_dict:\n coco_dict[i] = coco.convert(i, to='iso3')\n coco_dict[\"France-Monaco\"] = coco.convert(\"France\", to='iso3')\n coco_dict[\"Italy-San Marino\"] = coco.convert(\"Italy\", to='iso3')\n coco_dict[\"Switzerland-Liechtenstein\"] = coco.convert(\"Switzerland\", to='iso3')\n df_ebal[\"REF_AREA\"] = [coco_dict[i] for i in df_ebal[\"REF_AREA\"]]\n\n data_json_unfcc = json.loads(df_unfcc.to_json(orient='records'))\n data_json_ebal = json.loads(df_ebal.to_json(orient='records'))\n\n\n result = coll_ebal.insert_many(data_json_ebal)\n logger.info('Inserted a total of {} records in EBAL'.format(len(result.inserted_ids)))\n result = coll_unfcc.insert_many(data_json_unfcc)\n logger.info('Inserted a total of {} records in UNFCC'.format(len(result.inserted_ids)))\n\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n finally:\n client.close()", "def test_migrate_on_compute_fail(self):\n server, source_host, target_host = self._create_server()\n\n # Wrap _prep_resize so we can concurrently delete the server.\n original_prep_resize = compute_manager.ComputeManager._prep_resize\n\n def wrap_prep_resize(*args, **kwargs):\n self._delete_server(server)\n return original_prep_resize(*args, **kwargs)\n\n self.stub_out('nova.compute.manager.ComputeManager._prep_resize',\n wrap_prep_resize)\n\n # Now start the cold migration which will fail in the dest compute.\n self.api.post_server_action(server['id'], {'migrate': None})\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the allocation revert happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.MIGRATE, 'compute_prep_resize')\n self._assert_no_allocations(server)", "def test_update_virtualization_realm(self):\n pass", "def tear_down_mongo(self):\r\n split_db = self.split_mongo.db\r\n # old_mongo doesn't give a db attr, but all of the dbs are the same\r\n split_db.drop_collection(self.old_mongo.collection)", "def __init__(self, source='10.0.2.32', is_local=False):\n super().__init__(source, is_local)\n self.client = MongoClient(source)", "def _mongodump(self, port, errors):\n ret = run(\"mongodump %s --forceTableScan --host %s:%d -o %s/%s\" % (\"--oplog\" if self.name != \"config\" and self.name != \"mongos\" and not self.can_restart else \"\", self.host, port, self.backup_path, self.name))\n if ret != 0:\n errors.put(Exception(\"Error dumping %s server\" % self.name))\n traceback.print_exc()\n return\n\n ret = run(\"cd %s && tar zcvf %s.tar.gz %s && rm -rf %s\" % (self.backup_path, self.name, self.name, self.name))\n if ret != 0:\n errors.put(Exception(\"Error zipping %s server backup\" % self.name))\n traceback.print_exc()", "def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def test_port_update_after_vm_migration(self):\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def _requires_inmigrate_from(self):\n existing = locate_live_service(self.consul, \"qemu-\" + self.name)\n\n if existing and existing[\"Address\"] != self.this_host:\n # Consul knows about a running VM. Lets try a migration.\n return existing[\"Address\"]\n\n if self.ceph.is_unlocked():\n # Consul doesn't know about a running VM and no volume is locked.\n # It doesn't make sense to live migrate this VM.\n return None\n\n if self.ceph.locked_by_me():\n # Consul doesn't know about a running VM and the volume is\n # locked by me, so it doesn't make sense to live migrate the VM.\n return None\n\n # The VM seems to be locked somewhere else, try to migrate it from\n # there.\n return self.ceph.locked_by()", "def nfvi_live_migrate_instance(instance_uuid, callback, to_host_name=None,\n block_storage_migration='auto', context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n return cmd_id", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def OSSupportsIPv4(self) -> bool:", "def cleanUp(name):\n clovr = pymongo.Connection().clovr\n clovr.clusters.remove(dict(name=name))", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def test_replace_host_subnet(self):\n pass", "def prepare_maintenance(self, errors):\n self.cmd_line_opts = self.client['admin'].command('getCmdLineOpts')\n\n if not self.can_restart:\n return\n port = 27017\n specified = False\n repl_index = None\n new_cmd_line = self.cmd_line_opts['argv'][:]\n for i in range(len(new_cmd_line)):\n if new_cmd_line[i] == '--port':\n logging.info(str(new_cmd_line))\n self.maintenance_port = int(new_cmd_line[i+1]) + 20000\n new_cmd_line[i+1] = str(self.maintenance_port)\n specified = True\n if new_cmd_line[i] == '--replSet':\n repl_index = i\n if not specified:\n new_cmd_line.append('--port')\n new_cmd_line.append('47017')\n if repl_index is not None:\n del new_cmd_line[repl_index+1]\n del new_cmd_line[repl_index]\n try:\n self._shutdown()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()\n return\n run(\" \".join(new_cmd_line))\n self.client = pymongo.MongoClient(self.host, self.maintenance_port)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def migrate_volume(self, ctxt, volume, host):\n LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',\n {'id': volume['id'], 'host': host})\n\n false_ret = (False, None)\n\n if volume['status'] not in ('available', 'retyping'):\n LOG.warning(\"Volume status must be 'available' or 'retyping'.\"\n \" Current volume status: %s\", volume['status'])\n return false_ret\n\n if 'capabilities' not in host:\n LOG.warning(\"Unsupported host. No capabilities found\")\n return false_ret\n\n capabilities = host['capabilities']\n ns_shares = capabilities['ns_shares']\n dst_parts = capabilities['location_info'].split(':')\n dst_host, dst_volume = dst_parts[1:]\n\n if (capabilities.get('vendor_name') != 'Nexenta' or\n dst_parts[0] != self.__class__.__name__ or\n capabilities['free_capacity_gb'] < volume['size']):\n return false_ret\n\n nms = self.share2nms[volume['provider_location']]\n ssh_bindings = nms.appliance.ssh_list_bindings()\n shares = []\n for bind in ssh_bindings:\n for share in ns_shares:\n if (share.startswith(ssh_bindings[bind][3]) and\n ns_shares[share] >= volume['size']):\n shares.append(share)\n if len(shares) == 0:\n LOG.warning(\"Remote NexentaStor appliance at %s should be \"\n \"SSH-bound.\", share)\n return false_ret\n share = sorted(shares, key=ns_shares.get, reverse=True)[0]\n snapshot = {\n 'volume_name': volume['name'],\n 'volume_id': volume['id'],\n 'name': utils.get_migrate_snapshot_name(volume)\n }\n self.create_snapshot(snapshot)\n location = volume['provider_location']\n src = '%(share)s/%(volume)s@%(snapshot)s' % {\n 'share': location.split(':')[1].split('volumes/')[1],\n 'volume': volume['name'],\n 'snapshot': snapshot['name']\n }\n dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])\n try:\n nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot send source snapshot %(src)s to \"\n \"destination %(dst)s. Reason: %(exc)s\",\n {'src': src, 'dst': dst, 'exc': exc})\n return false_ret\n finally:\n try:\n self.delete_snapshot(snapshot)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary source snapshot \"\n \"%(src)s on NexentaStor Appliance: %(exc)s\",\n {'src': src, 'exc': exc})\n try:\n self.delete_volume(volume)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete source volume %(volume)s on \"\n \"NexentaStor Appliance: %(exc)s\",\n {'volume': volume['name'], 'exc': exc})\n\n dst_nms = self._get_nms_for_url(capabilities['nms_url'])\n dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],\n volume['name'], snapshot['name'])\n try:\n dst_nms.snapshot.destroy(dst_snapshot, '')\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary destination snapshot \"\n \"%(dst)s on NexentaStor Appliance: %(exc)s\",\n {'dst': dst_snapshot, 'exc': exc})\n return True, {'provider_location': share}", "def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def init_nova_network_ips(instance, server):\n ctx = context.ctx()\n\n management_ip = instance.management_ip\n internal_ip = instance.internal_ip\n\n for network_label in server.networks:\n nova_network = nova.client().networks.find(label=network_label)\n network = netaddr.IPNetwork(nova_network.cidr)\n for ip in server.networks[network_label]:\n if netaddr.IPAddress(ip) in network:\n internal_ip = instance.internal_ip or ip\n else:\n management_ip = instance.management_ip or ip\n\n if not CONF.use_floating_ips:\n management_ip = internal_ip\n\n conductor.instance_update(ctx, instance, {\"management_ip\": management_ip,\n \"internal_ip\": internal_ip})\n\n return internal_ip and management_ip", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def clone_collection(self, src_mongodb_uri, src_database, src_collection):\n # drop \"mongodb://\" suffix from uri\n src_conn = src_mongodb_uri[10:]\n if src_conn[-1] == \"/\":\n src_conn = src_conn[:-1]\n self.client.admin.command(\n {\"cloneCollection\": src_database + \".\" + src_collection, \"from\": src_conn}\n )", "def post_instance_ip_create(self, resource_dict):\n pass", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def zone_map(zoneadm, passthru='__all__'):\n\n ret = {}\n\n for z in zoneadm:\n chunks = z.split(':')\n\n if len(chunks) < 8:\n raise NotImplementedError(\n 'cannot parse zoneadm output: %d fields in %s' %\n (len(chunks), zoneadm))\n\n if chunks[0] == '-':\n continue\n\n if passthru == '__all__' or chunks[1] in passthru:\n ret[chunks[0]] = chunks[1]\n\n \"\"\"\n Here's a cheat: if we're in an NGZ, we don't actually care about\n the zone ID. In fact, kstat `link` instances *don't* match to\n zone ID in NGZs. So, we fudge the key.\n \"\"\"\n\n if len(zoneadm) == 1 and ret.keys()[0] != 'global':\n ret = {'0': ret.values()[0]}\n\n return ret", "def migrateLXCContainer(self,node,vmid,target):\n post_data = {'target': str(target)}\n data = self.connect('post','nodes/%s/lxc/%s/migrate' % (node,vmid), post_data)\n return data", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def save_list_mongo(listz):\t\n\tconnection = pymongo.Connection('localhost', 27017)\n\tdb = connection.database\n\tcollection = db.warez_collection", "def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')", "def mongodb_drop():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n client = client.drop_database(\"tweetbase\")", "def nocleanup():\n _lbs = kivaloo.servers.Server_lbs()\n _kvlds = kivaloo.servers.Server_kvlds()", "def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'" ]
[ "0.53361", "0.52414596", "0.5048282", "0.50085294", "0.48300168", "0.47680244", "0.4725034", "0.4668024", "0.46215114", "0.45944652", "0.45926207", "0.45436805", "0.45293182", "0.45082015", "0.45050445", "0.44917303", "0.4491257", "0.44898972", "0.44782624", "0.44595343", "0.44543976", "0.44533044", "0.44204715", "0.44174817", "0.44041285", "0.4401164", "0.43905857", "0.43628395", "0.4361698", "0.4351896", "0.43491897", "0.43161002", "0.43153453", "0.43075272", "0.43022346", "0.42878", "0.42711118", "0.4264831", "0.42577437", "0.42568153", "0.4255329", "0.42533553", "0.4252738", "0.42521662", "0.42487645", "0.42468122", "0.42419448", "0.42317715", "0.42304623", "0.42303053", "0.4225821", "0.4224186", "0.42193252", "0.42066112", "0.42049453", "0.42034575", "0.4201609", "0.42005357", "0.41979682", "0.4195347", "0.41948622", "0.41930196", "0.41566235", "0.415352", "0.41510588", "0.4145324", "0.41433296", "0.41433296", "0.41421765", "0.41411743", "0.41385832", "0.41382912", "0.41278353", "0.41277912", "0.41269913", "0.41256025", "0.4125223", "0.41244018", "0.41237873", "0.41237873", "0.4123678", "0.41235974", "0.4119317", "0.4100076", "0.40999612", "0.4092576", "0.40919498", "0.40904537", "0.40833938", "0.40771604", "0.40764236", "0.40753826", "0.40717652", "0.40699896", "0.4064618", "0.4064607", "0.40644893", "0.4061804", "0.40586606", "0.4057504" ]
0.50002617
4
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
def migrate_available_zone( self, request: dds_20151201_models.MigrateAvailableZoneRequest, ) -> dds_20151201_models.MigrateAvailableZoneResponse: runtime = util_models.RuntimeOptions() return self.migrate_available_zone_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone_with_options(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "async def migrate_available_zone_with_options_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def migrate_available_zone_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return await self.migrate_available_zone_with_options_async(request, runtime)", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run", "def test_transform_and_load_gcp_instances_and_nics(neo4j_session):\n instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE]\n instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses)\n cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG)\n\n instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'\n instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1'\n\n nic_query = \"\"\"\n MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface)\n OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag)\n RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip\n \"\"\"\n objects = neo4j_session.run(nic_query)\n actual_nodes = {\n (\n o['i.id'],\n o['i.zone_name'],\n o['i.project_id'],\n o['nic.nic_id'],\n o['nic.private_ip'],\n o['t.value'],\n o['r.lastupdated'],\n ) for o in objects\n }\n\n expected_nodes = {\n (\n instance_id1,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n None,\n TEST_UPDATE_TAG,\n ),\n (\n instance_id2,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0',\n '10.0.0.2',\n 'test',\n TEST_UPDATE_TAG,\n ),\n }\n assert actual_nodes == expected_nodes", "def mongoRestore( self, db, infile ):\n\t\tsys_command = \"mongorestore --db \" + db + \" --host \" + self.host + \" --port \" + str( self.port ) + \" \" + infile \n\t\tos.system(sys_command)", "def test_migrate_volume_generic_cross_az(self, migrate_volume_completion,\n nova_api):\n original_create = objects.Volume.create\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n def my_create(self, *args, **kwargs):\n self.status = 'available'\n original_create(self, *args, **kwargs)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host)\n\n host_obj = {'host': 'newhost', 'capabilities': {}}\n create_vol = self.patch('cinder.objects.Volume.create',\n side_effect=my_create, autospec=True)\n\n with mock.patch.object(self.volume, '_copy_volume_data') as copy_mock:\n self.volume._migrate_volume_generic(self.context, volume, host_obj,\n None)\n copy_mock.assert_called_with(self.context, volume, mock.ANY,\n remote='dest')\n migrate_volume_completion.assert_called_with(\n self.context, volume, mock.ANY, error=False)\n\n nova_api.return_value.update_server_volume.assert_not_called()\n\n self.assertEqual(dst_az,\n create_vol.call_args[0][0]['availability_zone'])", "def migrate_to_other_zone_with_options(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateToOtherZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateToOtherZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def setup_target_db(self):\n conn = MongoClient(host=self._target_host)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def get_mongo_config ( subnets ) :\n replications = \"\"\n primary_ip = get_primary_node(subnets)\n for subnet in subnets :\n if primary_ip != subnet.cidr_block :\n replication = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n replication = replication.replace(\".\", \"-\")\n replications = replications + \"\\nrs.add(\\\"ip-\"+replication+\":27017\\\");\"\n \n \n return \"\"\"#!/bin/bash -ex\n exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n:>/etc/replication.js\necho 'rs.initiate();\"\"\"+replications+\"\"\"\n\n'>>/etc/replication.js\n\"\"\"", "def upgrade_to_10():\n\n def switch_keys(doc, x, y):\n doc[y] = doc[x]\n doc.pop(x, None)\n\n\n jobs = config.db.jobs.find({'destination.container_type': {'$exists': True}})\n\n for job in jobs:\n switch_keys(job, 'algorithm_id', 'name')\n\n for key in job['inputs'].keys():\n inp = job['inputs'][key]\n\n switch_keys(inp, 'container_type', 'type')\n switch_keys(inp, 'container_id', 'id')\n switch_keys(inp, 'filename', 'name')\n\n\n dest = job['destination']\n switch_keys(dest, 'container_type', 'type')\n switch_keys(dest, 'container_id', 'id')\n\n config.db.jobs.update(\n {'_id': job['_id']},\n job\n )", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def restore_cluster(ctx, zone, db_instance, from_zone=None, from_db_instance=None, backup_folder=None, target_time=None):\n\n if from_zone == None:\n from_zone = zone\n if from_db_instance == None:\n from_db_instance = db_instance\n if backup_folder == None:\n get_env('AWS_SECRET_ACCESS_KEY', 'to list the backup buckets at AWS S3.')\n get_env('AWS_ACCESS_KEY_ID', 'to list the backup buckets at AWS S3.')\n get_env('AWS_REGION', 'to list the backup buckets at AWS S3.')\n print(\"Available values for --backup-folder :\\n\")\n res = ctx.run(\"aws s3 ls \" + backup_bucket_name(from_zone, from_db_instance), pty=True, hide=\"stdout\")\n for line in res.stdout.splitlines():\n print(re.search(\"PRE ([^ /]+)\", line).group(1))\n else:\n recover_from = \"{}/{}\".format(backup_bucket_name(from_zone, from_db_instance), backup_folder)\n print(\"\"\"\n Starting recovery\n \"\"\")\n more_vars = {'recover_from': recover_from}\n if target_time:\n more_vars['recovery_target_time'] = '\"{}\"'.format(target_time) # need quoting due to space char\n\n ctx.run(init_pg_servers_play_run(zone, db_instance, more_vars=more_vars), pty=True, echo=True)", "def _activate_new_zone(self):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n success = self._api.domain.zone.version.set(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n if not success:\n raise GandiApiException('Failed to activate new zone;')\n else:\n logging.info('New zone version activated.')", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_07_migrate_vm_live_with_snapshots(self):\n global vm\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def load_mongo_configuration(ec2_conn,base_name,params ):\n print \"loading mongo configurings\"\n \n ## Allow security from build server to mongodb\n app_type = 'MONGO'\n \n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n \n try :\n mongo_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n mongo_host = params.get( 'host' )\n mongo_port = params.get( 'port' )\n mongo_username = params.get( 'user-name' )\n mongo_password = params.get( 'password' )\n \n db_name = params.get( 'db_name' )\n collection_name = params.get( 'collection_name' )\n \n documents = params.get( 'documents' )\n \n uri = \"\"\n if len( mongo_username ) > 0 :\n uri = \"mongodb://\"+mongo_username+\":\"+mongo_password+\"@\"+mongo_host+\":\"+mongo_port+\"/\"\n else :\n uri = \"mongodb://\"+mongo_host+\":\"+mongo_port+\"/\"\n \n print \"Mongo Connect URL:\" +uri\n \n \n client = MongoClient(uri)\n \n\n db = client[db_name]\n collection = db[collection_name ]\n \n collection.remove()\n \n for document in documents :\n document = json.dumps(document)\n document = loads(document)\n collection.insert(document)\n document['createdTime'] = datetime.datetime.utcnow()\n collection.save(document)\n \n ## At the end revoke the build server rule \n try :\n mongo_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"\n \n print \"configured\"", "def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def igraph2mongo(graph,collection,mode='OUT',overwrite = False):\r\n for i in graph.vs:\r\n if not list(collection.find({'_id':i.index})):\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n post_id = collection.insert_one(post).inserted_id\r\n print( \"node \",post_id,\" added\")\r\n elif overwrite == True:\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n collection.replace_one({'_id':i.index},post)\r\n print(\"node \",i.index,\" replaced\")\r\n else:\r\n# print(\"THIS object has the _id\",i.index,list(collection.find({'_id':i.index})))\r\n pass\r\n if overwrite == True:\r\n print(collection, \"has been changed\")", "def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")", "def test_13_migrate_vm_live_resize_volume_on_remote(self):\n global vm2\n global data_disk_2\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def create_database():\n\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n\n coll_ebal = db.get_collection('ebal')\n coll_unfcc = db.get_collection('unfcc')\n\n df_ebal = pd.read_csv(EBAL_FILE)\n df_unfcc = pd.read_csv(UNFCC_FILE)\n df_ebal = decoding_codes(df_ebal)\n\n coco_dict = {}\n for i in df_ebal[\"REF_AREA\"].unique():\n # if i not in coco_dict:\n coco_dict[i] = coco.convert(i, to='iso3')\n coco_dict[\"France-Monaco\"] = coco.convert(\"France\", to='iso3')\n coco_dict[\"Italy-San Marino\"] = coco.convert(\"Italy\", to='iso3')\n coco_dict[\"Switzerland-Liechtenstein\"] = coco.convert(\"Switzerland\", to='iso3')\n df_ebal[\"REF_AREA\"] = [coco_dict[i] for i in df_ebal[\"REF_AREA\"]]\n\n data_json_unfcc = json.loads(df_unfcc.to_json(orient='records'))\n data_json_ebal = json.loads(df_ebal.to_json(orient='records'))\n\n\n result = coll_ebal.insert_many(data_json_ebal)\n logger.info('Inserted a total of {} records in EBAL'.format(len(result.inserted_ids)))\n result = coll_unfcc.insert_many(data_json_unfcc)\n logger.info('Inserted a total of {} records in UNFCC'.format(len(result.inserted_ids)))\n\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n finally:\n client.close()", "def _regrid_ak_ext_ana_pcp_stage4(supplemental_precip, config_options, wrf_hydro_geo_meta, mpi_config):\n\n # If the expected file is missing, this means we are allowing missing files, simply\n # exit out of this routine as the regridded fields have already been set to NDV.\n if not os.path.exists(supplemental_precip.file_in1):\n return\n\n # Check to see if the regrid complete flag for this\n # output time step is true. This entails the necessary\n # inputs have already been regridded and we can move on.\n if supplemental_precip.regridComplete:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"No StageIV regridding required for this timestep.\"\n err_handler.log_msg(config_options, mpi_config)\n return\n\n # Create a path for a temporary NetCDF files that will\n # be created through the wgrib2 process.\n stage4_tmp_nc = config_options.scratch_dir + \"/STAGEIV_TMP-{}.nc\".format(mkfilename())\n\n lat_var = \"latitude\"\n lon_var = \"longitude\"\n\n if supplemental_precip.fileType != NETCDF:\n # This file shouldn't exist.... but if it does (previously failed\n # execution of the program), remove it.....\n if mpi_config.rank == 0:\n if os.path.isfile(stage4_tmp_nc):\n config_options.statusMsg = \"Found old temporary file: \" + stage4_tmp_nc + \" - Removing.....\"\n err_handler.log_warning(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = f\"Unable to remove temporary file: {stage4_tmp_nc}\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Create a temporary NetCDF file from the GRIB2 file.\n cmd = f'$WGRIB2 -match \"APCP:surface:0-6 hour acc fcst\" {supplemental_precip.file_in2} -netcdf {stage4_tmp_nc}'\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"WGRIB2 command: {cmd}\"\n err_handler.log_msg(config_options, mpi_config)\n id_tmp = ioMod.open_grib2(supplemental_precip.file_in2, stage4_tmp_nc, cmd,\n config_options, mpi_config, inputVar=None)\n err_handler.check_program_status(config_options, mpi_config)\n else:\n create_link(\"STAGEIV-PCP\", supplemental_precip.file_in2, stage4_tmp_nc, config_options, mpi_config)\n id_tmp = ioMod.open_netcdf_forcing(stage4_tmp_nc, config_options, mpi_config, False, lat_var, lon_var)\n\n # Check to see if we need to calculate regridding weights.\n calc_regrid_flag = check_supp_pcp_regrid_status(id_tmp, supplemental_precip, config_options,\n wrf_hydro_geo_meta, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n if calc_regrid_flag:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"Calculating STAGE IV regridding weights.\"\n err_handler.log_msg(config_options, mpi_config)\n calculate_supp_pcp_weights(supplemental_precip, id_tmp, stage4_tmp_nc, config_options, mpi_config, lat_var, lon_var)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Regrid the input variables.\n var_tmp = None\n if mpi_config.rank == 0:\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"Regridding STAGE IV '{supplemental_precip.netcdf_var_names[-1]}' Precipitation.\"\n err_handler.log_msg(config_options, mpi_config)\n try:\n var_tmp = id_tmp.variables[supplemental_precip.netcdf_var_names[-1]][0,:,:]\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to extract precipitation from STAGE IV file: \" + \\\n supplemental_precip.file_in1 + \" (\" + str(err) + \")\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n var_sub_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_in.data[:, :] = var_sub_tmp\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to place STAGE IV precipitation into local ESMF field: \" + str(err)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_out = supplemental_precip.regridObj(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out)\n except ValueError as ve:\n config_options.errMsg = \"Unable to regrid STAGE IV supplemental precipitation: \" + str(ve)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Set any pixel cells outside the input domain to the global missing value.\n try:\n supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.regridded_mask == 0)] = \\\n config_options.globalNdv\n except (ValueError, ArithmeticError) as npe:\n config_options.errMsg = \"Unable to run mask search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n supplemental_precip.regridded_precip2[:, :] = supplemental_precip.esmf_field_out.data\n err_handler.check_program_status(config_options, mpi_config)\n\n # Convert the 6-hourly precipitation total to a rate of mm/s\n try:\n ind_valid = np.where(supplemental_precip.regridded_precip2 != config_options.globalNdv)\n supplemental_precip.regridded_precip2[ind_valid] = supplemental_precip.regridded_precip2[ind_valid] / 3600.0\n del ind_valid\n except (ValueError, ArithmeticError, AttributeError, KeyError) as npe:\n config_options.errMsg = \"Unable to run NDV search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # If we are on the first timestep, set the previous regridded field to be\n # the latest as there are no states for time 0.\n if config_options.current_output_step == 1:\n supplemental_precip.regridded_precip1[:, :] = \\\n supplemental_precip.regridded_precip2[:, :]\n err_handler.check_program_status(config_options, mpi_config)\n\n # Close the temporary NetCDF file and remove it.\n if mpi_config.rank == 0:\n try:\n id_tmp.close()\n except OSError:\n config_options.errMsg = \"Unable to close NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = \"Unable to remove NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)", "def test_migrate_on_compute_fail(self):\n server, source_host, target_host = self._create_server()\n\n # Wrap _prep_resize so we can concurrently delete the server.\n original_prep_resize = compute_manager.ComputeManager._prep_resize\n\n def wrap_prep_resize(*args, **kwargs):\n self._delete_server(server)\n return original_prep_resize(*args, **kwargs)\n\n self.stub_out('nova.compute.manager.ComputeManager._prep_resize',\n wrap_prep_resize)\n\n # Now start the cold migration which will fail in the dest compute.\n self.api.post_server_action(server['id'], {'migrate': None})\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the allocation revert happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.MIGRATE, 'compute_prep_resize')\n self._assert_no_allocations(server)", "def tear_down_mongo(self):\r\n split_db = self.split_mongo.db\r\n # old_mongo doesn't give a db attr, but all of the dbs are the same\r\n split_db.drop_collection(self.old_mongo.collection)", "def test_update_virtualization_realm(self):\n pass", "def __init__(self, source='10.0.2.32', is_local=False):\n super().__init__(source, is_local)\n self.client = MongoClient(source)", "def _mongodump(self, port, errors):\n ret = run(\"mongodump %s --forceTableScan --host %s:%d -o %s/%s\" % (\"--oplog\" if self.name != \"config\" and self.name != \"mongos\" and not self.can_restart else \"\", self.host, port, self.backup_path, self.name))\n if ret != 0:\n errors.put(Exception(\"Error dumping %s server\" % self.name))\n traceback.print_exc()\n return\n\n ret = run(\"cd %s && tar zcvf %s.tar.gz %s && rm -rf %s\" % (self.backup_path, self.name, self.name, self.name))\n if ret != 0:\n errors.put(Exception(\"Error zipping %s server backup\" % self.name))\n traceback.print_exc()", "def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def test_port_update_after_vm_migration(self):\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def _requires_inmigrate_from(self):\n existing = locate_live_service(self.consul, \"qemu-\" + self.name)\n\n if existing and existing[\"Address\"] != self.this_host:\n # Consul knows about a running VM. Lets try a migration.\n return existing[\"Address\"]\n\n if self.ceph.is_unlocked():\n # Consul doesn't know about a running VM and no volume is locked.\n # It doesn't make sense to live migrate this VM.\n return None\n\n if self.ceph.locked_by_me():\n # Consul doesn't know about a running VM and the volume is\n # locked by me, so it doesn't make sense to live migrate the VM.\n return None\n\n # The VM seems to be locked somewhere else, try to migrate it from\n # there.\n return self.ceph.locked_by()", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def nfvi_live_migrate_instance(instance_uuid, callback, to_host_name=None,\n block_storage_migration='auto', context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n return cmd_id", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def cleanUp(name):\n clovr = pymongo.Connection().clovr\n clovr.clusters.remove(dict(name=name))", "def OSSupportsIPv4(self) -> bool:", "def prepare_replica_for_exchange(self, replica):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def prepare_maintenance(self, errors):\n self.cmd_line_opts = self.client['admin'].command('getCmdLineOpts')\n\n if not self.can_restart:\n return\n port = 27017\n specified = False\n repl_index = None\n new_cmd_line = self.cmd_line_opts['argv'][:]\n for i in range(len(new_cmd_line)):\n if new_cmd_line[i] == '--port':\n logging.info(str(new_cmd_line))\n self.maintenance_port = int(new_cmd_line[i+1]) + 20000\n new_cmd_line[i+1] = str(self.maintenance_port)\n specified = True\n if new_cmd_line[i] == '--replSet':\n repl_index = i\n if not specified:\n new_cmd_line.append('--port')\n new_cmd_line.append('47017')\n if repl_index is not None:\n del new_cmd_line[repl_index+1]\n del new_cmd_line[repl_index]\n try:\n self._shutdown()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()\n return\n run(\" \".join(new_cmd_line))\n self.client = pymongo.MongoClient(self.host, self.maintenance_port)", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)", "def test_replace_host_subnet(self):\n pass", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def migrate_volume(self, ctxt, volume, host):\n LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',\n {'id': volume['id'], 'host': host})\n\n false_ret = (False, None)\n\n if volume['status'] not in ('available', 'retyping'):\n LOG.warning(\"Volume status must be 'available' or 'retyping'.\"\n \" Current volume status: %s\", volume['status'])\n return false_ret\n\n if 'capabilities' not in host:\n LOG.warning(\"Unsupported host. No capabilities found\")\n return false_ret\n\n capabilities = host['capabilities']\n ns_shares = capabilities['ns_shares']\n dst_parts = capabilities['location_info'].split(':')\n dst_host, dst_volume = dst_parts[1:]\n\n if (capabilities.get('vendor_name') != 'Nexenta' or\n dst_parts[0] != self.__class__.__name__ or\n capabilities['free_capacity_gb'] < volume['size']):\n return false_ret\n\n nms = self.share2nms[volume['provider_location']]\n ssh_bindings = nms.appliance.ssh_list_bindings()\n shares = []\n for bind in ssh_bindings:\n for share in ns_shares:\n if (share.startswith(ssh_bindings[bind][3]) and\n ns_shares[share] >= volume['size']):\n shares.append(share)\n if len(shares) == 0:\n LOG.warning(\"Remote NexentaStor appliance at %s should be \"\n \"SSH-bound.\", share)\n return false_ret\n share = sorted(shares, key=ns_shares.get, reverse=True)[0]\n snapshot = {\n 'volume_name': volume['name'],\n 'volume_id': volume['id'],\n 'name': utils.get_migrate_snapshot_name(volume)\n }\n self.create_snapshot(snapshot)\n location = volume['provider_location']\n src = '%(share)s/%(volume)s@%(snapshot)s' % {\n 'share': location.split(':')[1].split('volumes/')[1],\n 'volume': volume['name'],\n 'snapshot': snapshot['name']\n }\n dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])\n try:\n nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot send source snapshot %(src)s to \"\n \"destination %(dst)s. Reason: %(exc)s\",\n {'src': src, 'dst': dst, 'exc': exc})\n return false_ret\n finally:\n try:\n self.delete_snapshot(snapshot)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary source snapshot \"\n \"%(src)s on NexentaStor Appliance: %(exc)s\",\n {'src': src, 'exc': exc})\n try:\n self.delete_volume(volume)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete source volume %(volume)s on \"\n \"NexentaStor Appliance: %(exc)s\",\n {'volume': volume['name'], 'exc': exc})\n\n dst_nms = self._get_nms_for_url(capabilities['nms_url'])\n dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],\n volume['name'], snapshot['name'])\n try:\n dst_nms.snapshot.destroy(dst_snapshot, '')\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary destination snapshot \"\n \"%(dst)s on NexentaStor Appliance: %(exc)s\",\n {'dst': dst_snapshot, 'exc': exc})\n return True, {'provider_location': share}", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def init_nova_network_ips(instance, server):\n ctx = context.ctx()\n\n management_ip = instance.management_ip\n internal_ip = instance.internal_ip\n\n for network_label in server.networks:\n nova_network = nova.client().networks.find(label=network_label)\n network = netaddr.IPNetwork(nova_network.cidr)\n for ip in server.networks[network_label]:\n if netaddr.IPAddress(ip) in network:\n internal_ip = instance.internal_ip or ip\n else:\n management_ip = instance.management_ip or ip\n\n if not CONF.use_floating_ips:\n management_ip = internal_ip\n\n conductor.instance_update(ctx, instance, {\"management_ip\": management_ip,\n \"internal_ip\": internal_ip})\n\n return internal_ip and management_ip", "def clone_collection(self, src_mongodb_uri, src_database, src_collection):\n # drop \"mongodb://\" suffix from uri\n src_conn = src_mongodb_uri[10:]\n if src_conn[-1] == \"/\":\n src_conn = src_conn[:-1]\n self.client.admin.command(\n {\"cloneCollection\": src_database + \".\" + src_collection, \"from\": src_conn}\n )", "def post_instance_ip_create(self, resource_dict):\n pass", "def zone_map(zoneadm, passthru='__all__'):\n\n ret = {}\n\n for z in zoneadm:\n chunks = z.split(':')\n\n if len(chunks) < 8:\n raise NotImplementedError(\n 'cannot parse zoneadm output: %d fields in %s' %\n (len(chunks), zoneadm))\n\n if chunks[0] == '-':\n continue\n\n if passthru == '__all__' or chunks[1] in passthru:\n ret[chunks[0]] = chunks[1]\n\n \"\"\"\n Here's a cheat: if we're in an NGZ, we don't actually care about\n the zone ID. In fact, kstat `link` instances *don't* match to\n zone ID in NGZs. So, we fudge the key.\n \"\"\"\n\n if len(zoneadm) == 1 and ret.keys()[0] != 'global':\n ret = {'0': ret.values()[0]}\n\n return ret", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def migrateLXCContainer(self,node,vmid,target):\n post_data = {'target': str(target)}\n data = self.connect('post','nodes/%s/lxc/%s/migrate' % (node,vmid), post_data)\n return data", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def mongodb_drop():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n client = client.drop_database(\"tweetbase\")", "def save_list_mongo(listz):\t\n\tconnection = pymongo.Connection('localhost', 27017)\n\tdb = connection.database\n\tcollection = db.warez_collection", "def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')", "def nocleanup():\n _lbs = kivaloo.servers.Server_lbs()\n _kvlds = kivaloo.servers.Server_kvlds()", "def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'" ]
[ "0.5335884", "0.50473094", "0.5008553", "0.49998647", "0.48307848", "0.47682485", "0.47216007", "0.4666874", "0.46228546", "0.45956224", "0.4590136", "0.45401716", "0.45268676", "0.4509593", "0.45067737", "0.4491239", "0.44909394", "0.4489098", "0.44764355", "0.44562212", "0.44552153", "0.4452588", "0.44209155", "0.4413766", "0.44047692", "0.44022316", "0.43876037", "0.43639293", "0.43613485", "0.43509868", "0.4350281", "0.43180835", "0.43142134", "0.43064004", "0.43009833", "0.42890292", "0.4267697", "0.42645198", "0.4257341", "0.42567968", "0.42564234", "0.42538708", "0.42514712", "0.42497528", "0.4248357", "0.4247581", "0.42430687", "0.42307425", "0.4230253", "0.42295033", "0.42262346", "0.42239565", "0.42197517", "0.42053407", "0.42049512", "0.4203045", "0.42021617", "0.42013466", "0.4198466", "0.41966215", "0.41961485", "0.41927412", "0.41557997", "0.41503388", "0.4149511", "0.41447994", "0.41431043", "0.41431043", "0.41413295", "0.4138478", "0.41375104", "0.4135161", "0.41278815", "0.4126963", "0.41255993", "0.4123824", "0.4123794", "0.41236663", "0.41236663", "0.4123581", "0.41211066", "0.41202372", "0.41177627", "0.4099029", "0.40968773", "0.4092736", "0.40895063", "0.4089035", "0.40839347", "0.40738735", "0.40738624", "0.40725204", "0.40713844", "0.40702423", "0.40662047", "0.40658936", "0.40632343", "0.40607414", "0.4058552", "0.40555742" ]
0.52417934
1
This operation is available only for replica set instances that run MongoDB 4.2 or earlier and sharded cluster instances. If you have applied for a public endpoint for the ApsaraDB for MongoDB instance, you must call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint before you call the MigrateAvailableZone operation. Transparent data encryption (TDE) is disabled for the ApsaraDB for MongoDB instance. The source zone and the destination zone belong to the same region. A vSwitch is created in the destination zone. This prerequisite must be met if the instance resides in a virtual private cloud (VPC). For more information about how to create a vSwitch, see [Work with vSwitches](~~65387~~).
async def migrate_available_zone_async( self, request: dds_20151201_models.MigrateAvailableZoneRequest, ) -> dds_20151201_models.MigrateAvailableZoneResponse: runtime = util_models.RuntimeOptions() return await self.migrate_available_zone_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate_available_zone_with_options(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def migrate_available_zone(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_available_zone_with_options(request, runtime)", "def test_migrate_volume_driver_cross_az(self):\n # Mock driver and rpc functions\n self.mock_object(self.volume.driver, 'migrate_volume',\n lambda x, y, z, new_type_id=None: (\n True, {'user_id': fake.USER_ID}))\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n volume = tests_utils.create_volume(self.context, size=0,\n host=CONF.host,\n migration_status='migrating')\n host_obj = {'host': 'newhost', 'capabilities': {}}\n self.volume.migrate_volume(self.context, volume, host_obj, False)\n\n # check volume properties\n volume.refresh()\n self.assertEqual('newhost', volume.host)\n self.assertEqual('success', volume.migration_status)\n self.assertEqual(dst_az, volume.availability_zone)", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "async def migrate_available_zone_with_options_async(\n self,\n request: dds_20151201_models.MigrateAvailableZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateAvailableZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.vswitch):\n query['Vswitch'] = request.vswitch\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateAvailableZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateAvailableZoneResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "def change_zone_ip(config, section, new_ip):\n\n a_name = config.get(section, \"a_name\")\n apikey = config.get(section, \"apikey\")\n ttl = int(config.get(section, \"ttl\"))\n zone_id = get_zone_id(config, section)\n\n zone_record = {'name': a_name, 'value': new_ip, 'ttl': ttl, 'type': 'A'}\n\n new_zone_ver = api.domain.zone.version.new(apikey, zone_id)\n\n # clear old A record (defaults to previous verison's\n api.domain.zone.record.delete(apikey, zone_id, new_zone_ver,\n {'type': 'A', 'name': a_name})\n\n # Add in new A record\n api.domain.zone.record.add(apikey, zone_id, new_zone_ver, zone_record)\n\n # Set new zone version as the active zone\n api.domain.zone.version.set(apikey, zone_id, new_zone_ver)", "def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)", "def test_mongodb_destination(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='\\n'.join(DATA))\n\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n # MongoDB destination uses the CRUD operation in the sdc.operation.type record header attribute when writing\n # to MongoDB. Value 4 specified below is for UPSERT.\n expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'sdc.operation.type',\n 'headerAttributeExpression': '1'}]\n\n mongodb_dest = pipeline_builder.add_stage('MongoDB', type='destination')\n mongodb_dest.set_attributes(database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n # From 3.6.0, unique key field is a list, otherwise single string for older version.\n mongodb_dest.unique_key_field = ['/text'] if Version(sdc_builder.version) >= Version('3.6.0') else '/text'\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> expression_evaluator >> mongodb_dest\n record_deduplicator >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # Data is generated in dev_raw_data_source and sent to MongoDB using pipeline.\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(DATA))\n sdc_executor.stop_pipeline(pipeline)\n\n # Verify data is received correctly using PyMongo.\n # Similar to writing, while reading data, we specify MongoDB database and the collection inside it.\n logger.info('Verifying docs received with PyMongo...')\n assert [item['text'] for item in mongodb.engine[mongodb_dest.database][mongodb_dest.collection].find()] == DATA\n\n finally:\n logger.info('Dropping %s database...', mongodb_dest.database)\n mongodb.engine.drop_database(mongodb_dest.database)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def switch_availability_zone():\n global current_az\n if current_az == 0:\n current_az = 1\n else:\n current_az = 0", "def migrate_contract(network):\n print(network)", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_12_migrate_vm_live_with_snapshots_on_remote(self):\n global vm2\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm2.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def connect_and_update(_id, padding, host, port, dbname, collname,\n updates_per_process, process_number, replica_set):\n client = MongoClient(host=[get_hostport_string(host=host, port=port)],\n replicaset=replica_set)\n db = client[dbname]\n collection = db[collname]\n try: # Unless using multiple docs, most of these will fail\n collection.insert_one({\"_id\": _id, \"padding\": padding})\n except:\n pass\n\n for j in xrange(updates_per_process):\n update_document(_id, collection, padding, process_number)\n \n client.close()", "def migrate_to_other_zone(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n runtime = util_models.RuntimeOptions()\n return self.migrate_to_other_zone_with_options(request, runtime)", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def to_network_v4(zone: Zone) -> ipaddress.IPv4Network:\n\n labels = zone.name.split(\".\")[:-3]\n netmask: int = 8 * len(labels)\n offset = 4 - len(labels)\n\n pattern = r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/-](2[5-9]|3[0-1]))?$\"\n last_label_parsed = re.search(pattern, labels[0])\n if not last_label_parsed:\n raise ValueError(\"Faild to parse the zone name\")\n\n if last_label_parsed[2]:\n # non-octet boundary delegation detected\n # remove netmask and save it to the result\n last_octect = last_label_parsed[1]\n labels[0] = last_octect\n netmask = int(last_label_parsed[2][1:])\n\n labels = [\"0\"] * offset + labels\n prefix_str = \".\".join(reversed(labels))\n prefix_str += f\"/{netmask}\"\n\n return ipaddress.IPv4Network(prefix_str, strict=True)", "def _resolve_shard(client):\n status = client.admin.command('serverStatus')\n if status['process'] == 'mongos':\n raise RuntimeError(\"Destination cannot be mongos\")\n return client", "def migrateVirtualMachine(self,node,vmid,target,online=False,force=False):\n post_data = {'target': str(target)}\n if online:\n post_data['online'] = '1'\n if force:\n post_data['force'] = '1'\n data = self.connect('post',\"nodes/%s/qemu/%s/migrate\" % (node,vmid), post_data)\n return data", "def setup_source_db(self):\n conn = MongoReplicaSetClient(host=self._source_host,\n replicaSet=self._replica_set,\n read_preference=ReadPreference.PRIMARY)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def migrate_replica(replica, location, noRemove=False, mirror=False):\n\n from tardis.tardis_portal.models import Replica, Location\n\n with transaction.commit_on_success():\n replica = Replica.objects.select_for_update().get(pk=replica.pk)\n source = Location.get_location(replica.location.name)\n\n if not replica.verified or location.provider.trust_length:\n raise MigrationError('Only verified datafiles can be migrated' \\\n ' to this destination')\n\n filename = replica.get_absolute_filepath()\n try:\n newreplica = Replica.objects.get(datafile=replica.datafile,\n location=location)\n created_replica = False\n # We've most likely mirrored this file previously. But if\n # we are about to delete the source Replica, we need to check\n # that the target Replica still verifies.\n if not mirror and not check_file_transferred(newreplica, location):\n raise MigrationError('Previously mirrored / migrated Replica' \\\n ' no longer verifies locally!')\n except Replica.DoesNotExist:\n newreplica = Replica()\n newreplica.location = location\n newreplica.datafile = replica.datafile\n newreplica.protocol = ''\n newreplica.stay_remote = location != Location.get_default_location()\n newreplica.verified = False\n url = location.provider.generate_url(newreplica)\n\n if newreplica.url == url:\n # We should get here ...\n raise MigrationError('Cannot migrate a replica to its' \\\n ' current location')\n newreplica.url = url\n location.provider.put_file(replica, newreplica)\n verified = False\n try:\n verified = check_file_transferred(newreplica, location)\n except:\n # FIXME - should we always do this?\n location.provider.remove_file(newreplica)\n raise\n\n newreplica.verified = verified\n newreplica.save()\n logger.info('Transferred file %s for replica %s' %\n (filename, replica.id))\n created_replica = True\n\n if mirror:\n return created_replica\n\n # FIXME - do this more reliably ...\n replica.delete()\n if not noRemove:\n source.provider.remove_file(replica)\n logger.info('Removed local file %s for replica %s' %\n (filename, replica.id))\n return True", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def move_ips_to_interface(apps, schema_editor):\n UserAS = apps.get_model('scionlab', 'UserAS')\n\n for useras in UserAS.objects.iterator():\n # UserASes have a unique host and before the multi-AP feature had a unique interface\n host = useras.hosts.get()\n iface = useras.interfaces.get()\n if not iface.public_ip:\n iface.public_ip = host.public_ip\n iface.bind_ip = host.bind_ip\n iface.save()\n host.public_ip = None\n host.bind_ip = None\n host.save()", "def upgrade_to_2():\n\n def update_file_origins(cont_list, cont_name):\n for container in cont_list:\n updated_files = []\n for file in container.get('files', []):\n origin = file.get('origin')\n if origin is not None:\n if origin.get('name', None) is None:\n file['origin']['name'] = origin['id']\n if origin.get('method', None) is None:\n file['origin']['method'] = ''\n updated_files.append(file)\n\n query = {'_id': container['_id']}\n update = {'$set': {'files': updated_files}}\n result = config.db[cont_name].update_one(query, update)\n\n query = {'$and':[{'files.origin.name': { '$exists': False}}, {'files.origin.id': { '$exists': True}}]}\n\n update_file_origins(config.db.collections.find(query), 'collections')\n update_file_origins(config.db.projects.find(query), 'projects')\n update_file_origins(config.db.sessions.find(query), 'sessions')\n update_file_origins(config.db.acquisitions.find(query), 'acquisitions')", "def try_atlas():\n result = {}\n try:\n vcap_services = os.getenv('VCAP_SERVICES')\n services = json.loads(vcap_services)\n for service_name in services.keys():\n print(f'service_name={service_name}')\n if service_name == \"_\":\n continue\n credentials = load_from_vcap_services(service_name)\n result.update(credentials)\n except Exception as err:\n print( f'Error looking for VCAP_SERVICES {err}')\n result['error']=err\n return result\n\n mongo_results = {}\n try:\n db = MongoClient( result['connectionString'] )\n mongo_results[\"MongoClient\"]= f'{db}'\n mongo_results[\"server_info\"]=db.server_info()\n except Exception as err:\n print( f'Error trying connection to Atlas: {err}')\n result['atlas-error']=err\n finally:\n result['mongo']=mongo_results\n\n return result", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run", "def test_transform_and_load_gcp_instances_and_nics(neo4j_session):\n instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE]\n instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses)\n cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG)\n\n instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test'\n instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1'\n\n nic_query = \"\"\"\n MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface)\n OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag)\n RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip\n \"\"\"\n objects = neo4j_session.run(nic_query)\n actual_nodes = {\n (\n o['i.id'],\n o['i.zone_name'],\n o['i.project_id'],\n o['nic.nic_id'],\n o['nic.private_ip'],\n o['t.value'],\n o['r.lastupdated'],\n ) for o in objects\n }\n\n expected_nodes = {\n (\n instance_id1,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n None,\n TEST_UPDATE_TAG,\n ),\n (\n instance_id2,\n 'europe-west2-b',\n 'project-abc',\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0',\n '10.0.0.2',\n 'test',\n TEST_UPDATE_TAG,\n ),\n }\n assert actual_nodes == expected_nodes", "def mongoRestore( self, db, infile ):\n\t\tsys_command = \"mongorestore --db \" + db + \" --host \" + self.host + \" --port \" + str( self.port ) + \" \" + infile \n\t\tos.system(sys_command)", "def test_migrate_volume_generic_cross_az(self, migrate_volume_completion,\n nova_api):\n original_create = objects.Volume.create\n dst_az = 'AZ2'\n db.service_update(self.context, self._service.id,\n {'availability_zone': dst_az})\n\n def my_create(self, *args, **kwargs):\n self.status = 'available'\n original_create(self, *args, **kwargs)\n\n volume = tests_utils.create_volume(self.context, size=1,\n host=CONF.host)\n\n host_obj = {'host': 'newhost', 'capabilities': {}}\n create_vol = self.patch('cinder.objects.Volume.create',\n side_effect=my_create, autospec=True)\n\n with mock.patch.object(self.volume, '_copy_volume_data') as copy_mock:\n self.volume._migrate_volume_generic(self.context, volume, host_obj,\n None)\n copy_mock.assert_called_with(self.context, volume, mock.ANY,\n remote='dest')\n migrate_volume_completion.assert_called_with(\n self.context, volume, mock.ANY, error=False)\n\n nova_api.return_value.update_server_volume.assert_not_called()\n\n self.assertEqual(dst_az,\n create_vol.call_args[0][0]['availability_zone'])", "def migrate_to_other_zone_with_options(\n self,\n request: dds_20151201_models.MigrateToOtherZoneRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.MigrateToOtherZoneResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.effective_time):\n query['EffectiveTime'] = request.effective_time\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.v_switch_id):\n query['VSwitchId'] = request.v_switch_id\n if not UtilClient.is_unset(request.zone_id):\n query['ZoneId'] = request.zone_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='MigrateToOtherZone',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.MigrateToOtherZoneResponse(),\n self.call_api(params, req, runtime)\n )", "def test_vm_migration_across_hosts(self):\n\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def setup_target_db(self):\n conn = MongoClient(host=self._target_host)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def get_mongo_config ( subnets ) :\n replications = \"\"\n primary_ip = get_primary_node(subnets)\n for subnet in subnets :\n if primary_ip != subnet.cidr_block :\n replication = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n replication = replication.replace(\".\", \"-\")\n replications = replications + \"\\nrs.add(\\\"ip-\"+replication+\":27017\\\");\"\n \n \n return \"\"\"#!/bin/bash -ex\n exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n:>/etc/replication.js\necho 'rs.initiate();\"\"\"+replications+\"\"\"\n\n'>>/etc/replication.js\n\"\"\"", "def convert_container_to_replica(\n self,\n replica_name: str,\n active_container: docker.models.containers.Container,\n passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:\n new_replica_name = self.sanitize_replica_name(replica_name)\n replica_list = []\n container_list = [\n active_container, passive_container] if passive_container else [active_container]\n\n logger.info(\n f'Creating new replica image with name {new_replica_name}...')\n\n for container in container_list:\n try:\n self.client.images.remove(new_replica_name, force=True)\n except docker.errors.ImageNotFound:\n pass\n\n container_arch = container.name.split('_')[-1]\n\n # commit with arch tag\n replica = container.commit(\n repository=new_replica_name, tag=container_arch)\n replica_list.append(replica)\n\n logger.info(\n f'Replica image {replica.tags[0]} created. Cleaning up...')\n self.remove_container(container.name)\n\n for replica in replica_list:\n if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:\n local_arch_replica = replica\n local_arch_replica.tag(\n repository=new_replica_name, tag='latest')\n\n # this is done due to how recomitting existing image is not reflected in 'replica_list' var\n actual_replica_list = self.client.images.list(new_replica_name)\n\n return actual_replica_list", "def upgrade_to_10():\n\n def switch_keys(doc, x, y):\n doc[y] = doc[x]\n doc.pop(x, None)\n\n\n jobs = config.db.jobs.find({'destination.container_type': {'$exists': True}})\n\n for job in jobs:\n switch_keys(job, 'algorithm_id', 'name')\n\n for key in job['inputs'].keys():\n inp = job['inputs'][key]\n\n switch_keys(inp, 'container_type', 'type')\n switch_keys(inp, 'container_id', 'id')\n switch_keys(inp, 'filename', 'name')\n\n\n dest = job['destination']\n switch_keys(dest, 'container_type', 'type')\n switch_keys(dest, 'container_id', 'id')\n\n config.db.jobs.update(\n {'_id': job['_id']},\n job\n )", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def restore_cluster(ctx, zone, db_instance, from_zone=None, from_db_instance=None, backup_folder=None, target_time=None):\n\n if from_zone == None:\n from_zone = zone\n if from_db_instance == None:\n from_db_instance = db_instance\n if backup_folder == None:\n get_env('AWS_SECRET_ACCESS_KEY', 'to list the backup buckets at AWS S3.')\n get_env('AWS_ACCESS_KEY_ID', 'to list the backup buckets at AWS S3.')\n get_env('AWS_REGION', 'to list the backup buckets at AWS S3.')\n print(\"Available values for --backup-folder :\\n\")\n res = ctx.run(\"aws s3 ls \" + backup_bucket_name(from_zone, from_db_instance), pty=True, hide=\"stdout\")\n for line in res.stdout.splitlines():\n print(re.search(\"PRE ([^ /]+)\", line).group(1))\n else:\n recover_from = \"{}/{}\".format(backup_bucket_name(from_zone, from_db_instance), backup_folder)\n print(\"\"\"\n Starting recovery\n \"\"\")\n more_vars = {'recover_from': recover_from}\n if target_time:\n more_vars['recovery_target_time'] = '\"{}\"'.format(target_time) # need quoting due to space char\n\n ctx.run(init_pg_servers_play_run(zone, db_instance, more_vars=more_vars), pty=True, echo=True)", "def _activate_new_zone(self):\n if ((not hasattr(self, '_current_zone')) or (not self._current_zone)) or ((not hasattr(self, '_new_zone_version_number')) or (not self._new_zone_version_number)):\n raise GandiApiException(\"Can't update record, no cloned zone available.\")\n success = self._api.domain.zone.version.set(self._api_key, self._current_zone['id'], \n self._new_zone_version_number)\n if not success:\n raise GandiApiException('Failed to activate new zone;')\n else:\n logging.info('New zone version activated.')", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_07_migrate_vm_live_with_snapshots(self):\n global vm\n # Get ROOT Volume\n vol_for_snap = list_volumes(\n self.apiclient,\n virtualmachineid=vm.id,\n listall=True)\n for vol in vol_for_snap:\n snapshot = Snapshot.create(\n self.apiclient,\n volume_id=vol.id\n )\n snapshot.validateState(\n self.apiclient,\n snapshotstate=\"backedup\",\n )\n # Migrate all volumes and VMs\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def dns_sync(self, args):\r\n dns = DNSManager(self.client)\r\n vsi = VSManager(self.client)\r\n\r\n vs_id = resolve_id(vsi.resolve_ids, args.get('<identifier>'), 'VS')\r\n instance = vsi.get_instance(vs_id)\r\n zone_id = resolve_id(dns.resolve_ids, instance['domain'], name='zone')\r\n\r\n def sync_a_record():\r\n \"\"\" Sync A record \"\"\"\r\n records = dns.get_records(\r\n zone_id,\r\n host=instance['hostname'],\r\n )\r\n\r\n if not records:\r\n # don't have a record, lets add one to the base zone\r\n dns.create_record(\r\n zone['id'],\r\n instance['hostname'],\r\n 'a',\r\n instance['primaryIpAddress'],\r\n ttl=args['--ttl'])\r\n else:\r\n recs = [x for x in records if x['type'].lower() == 'a']\r\n if len(recs) != 1:\r\n raise CLIAbort(\"Aborting A record sync, found %d \"\r\n \"A record exists!\" % len(recs))\r\n rec = recs[0]\r\n rec['data'] = instance['primaryIpAddress']\r\n rec['ttl'] = args['--ttl']\r\n dns.edit_record(rec)\r\n\r\n def sync_ptr_record():\r\n \"\"\" Sync PTR record \"\"\"\r\n host_rec = instance['primaryIpAddress'].split('.')[-1]\r\n ptr_domains = self.client['Virtual_Guest'].\\\r\n getReverseDomainRecords(id=instance['id'])[0]\r\n edit_ptr = None\r\n for ptr in ptr_domains['resourceRecords']:\r\n if ptr['host'] == host_rec:\r\n ptr['ttl'] = args['--ttl']\r\n edit_ptr = ptr\r\n break\r\n\r\n if edit_ptr:\r\n edit_ptr['data'] = instance['fullyQualifiedDomainName']\r\n dns.edit_record(edit_ptr)\r\n else:\r\n dns.create_record(\r\n ptr_domains['id'],\r\n host_rec,\r\n 'ptr',\r\n instance['fullyQualifiedDomainName'],\r\n ttl=args['--ttl'])\r\n\r\n if not instance['primaryIpAddress']:\r\n raise CLIAbort('No primary IP address associated with this VS')\r\n\r\n zone = dns.get_zone(zone_id)\r\n\r\n go_for_it = args['--really'] or confirm(\r\n \"Attempt to update DNS records for %s\"\r\n % instance['fullyQualifiedDomainName'])\r\n\r\n if not go_for_it:\r\n raise CLIAbort(\"Aborting DNS sync\")\r\n\r\n both = False\r\n if not args['--ptr'] and not args['-a']:\r\n both = True\r\n\r\n if both or args['-a']:\r\n sync_a_record()\r\n\r\n if both or args['--ptr']:\r\n sync_ptr_record()", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def load_mongo_configuration(ec2_conn,base_name,params ):\n print \"loading mongo configurings\"\n \n ## Allow security from build server to mongodb\n app_type = 'MONGO'\n \n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n \n try :\n mongo_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n mongo_host = params.get( 'host' )\n mongo_port = params.get( 'port' )\n mongo_username = params.get( 'user-name' )\n mongo_password = params.get( 'password' )\n \n db_name = params.get( 'db_name' )\n collection_name = params.get( 'collection_name' )\n \n documents = params.get( 'documents' )\n \n uri = \"\"\n if len( mongo_username ) > 0 :\n uri = \"mongodb://\"+mongo_username+\":\"+mongo_password+\"@\"+mongo_host+\":\"+mongo_port+\"/\"\n else :\n uri = \"mongodb://\"+mongo_host+\":\"+mongo_port+\"/\"\n \n print \"Mongo Connect URL:\" +uri\n \n \n client = MongoClient(uri)\n \n\n db = client[db_name]\n collection = db[collection_name ]\n \n collection.remove()\n \n for document in documents :\n document = json.dumps(document)\n document = loads(document)\n collection.insert(document)\n document['createdTime'] = datetime.datetime.utcnow()\n collection.save(document)\n \n ## At the end revoke the build server rule \n try :\n mongo_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"\n \n print \"configured\"", "def igraph2mongo(graph,collection,mode='OUT',overwrite = False):\r\n for i in graph.vs:\r\n if not list(collection.find({'_id':i.index})):\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n post_id = collection.insert_one(post).inserted_id\r\n print( \"node \",post_id,\" added\")\r\n elif overwrite == True:\r\n post = {\"_id\": i.index,\r\n \"neighbors_{}\".format(mode):list(set(graph.neighbors(i.index,mode=mode)))}\r\n collection.replace_one({'_id':i.index},post)\r\n print(\"node \",i.index,\" replaced\")\r\n else:\r\n# print(\"THIS object has the _id\",i.index,list(collection.find({'_id':i.index})))\r\n pass\r\n if overwrite == True:\r\n print(collection, \"has been changed\")", "def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")", "def test_13_migrate_vm_live_resize_volume_on_remote(self):\n global vm2\n global data_disk_2\n\n vol = self.helper.resize_volume(apiclient = self.apiclient, volume = data_disk_1, shrinkOk = False, maxiops = 15000)\n\n # Migrate all volumes and VMs\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def create_database():\n\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n\n coll_ebal = db.get_collection('ebal')\n coll_unfcc = db.get_collection('unfcc')\n\n df_ebal = pd.read_csv(EBAL_FILE)\n df_unfcc = pd.read_csv(UNFCC_FILE)\n df_ebal = decoding_codes(df_ebal)\n\n coco_dict = {}\n for i in df_ebal[\"REF_AREA\"].unique():\n # if i not in coco_dict:\n coco_dict[i] = coco.convert(i, to='iso3')\n coco_dict[\"France-Monaco\"] = coco.convert(\"France\", to='iso3')\n coco_dict[\"Italy-San Marino\"] = coco.convert(\"Italy\", to='iso3')\n coco_dict[\"Switzerland-Liechtenstein\"] = coco.convert(\"Switzerland\", to='iso3')\n df_ebal[\"REF_AREA\"] = [coco_dict[i] for i in df_ebal[\"REF_AREA\"]]\n\n data_json_unfcc = json.loads(df_unfcc.to_json(orient='records'))\n data_json_ebal = json.loads(df_ebal.to_json(orient='records'))\n\n\n result = coll_ebal.insert_many(data_json_ebal)\n logger.info('Inserted a total of {} records in EBAL'.format(len(result.inserted_ids)))\n result = coll_unfcc.insert_many(data_json_unfcc)\n logger.info('Inserted a total of {} records in UNFCC'.format(len(result.inserted_ids)))\n\n except pymongo.errors.ConnectionFailure as e:\n logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n finally:\n client.close()", "def _regrid_ak_ext_ana_pcp_stage4(supplemental_precip, config_options, wrf_hydro_geo_meta, mpi_config):\n\n # If the expected file is missing, this means we are allowing missing files, simply\n # exit out of this routine as the regridded fields have already been set to NDV.\n if not os.path.exists(supplemental_precip.file_in1):\n return\n\n # Check to see if the regrid complete flag for this\n # output time step is true. This entails the necessary\n # inputs have already been regridded and we can move on.\n if supplemental_precip.regridComplete:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"No StageIV regridding required for this timestep.\"\n err_handler.log_msg(config_options, mpi_config)\n return\n\n # Create a path for a temporary NetCDF files that will\n # be created through the wgrib2 process.\n stage4_tmp_nc = config_options.scratch_dir + \"/STAGEIV_TMP-{}.nc\".format(mkfilename())\n\n lat_var = \"latitude\"\n lon_var = \"longitude\"\n\n if supplemental_precip.fileType != NETCDF:\n # This file shouldn't exist.... but if it does (previously failed\n # execution of the program), remove it.....\n if mpi_config.rank == 0:\n if os.path.isfile(stage4_tmp_nc):\n config_options.statusMsg = \"Found old temporary file: \" + stage4_tmp_nc + \" - Removing.....\"\n err_handler.log_warning(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = f\"Unable to remove temporary file: {stage4_tmp_nc}\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Create a temporary NetCDF file from the GRIB2 file.\n cmd = f'$WGRIB2 -match \"APCP:surface:0-6 hour acc fcst\" {supplemental_precip.file_in2} -netcdf {stage4_tmp_nc}'\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"WGRIB2 command: {cmd}\"\n err_handler.log_msg(config_options, mpi_config)\n id_tmp = ioMod.open_grib2(supplemental_precip.file_in2, stage4_tmp_nc, cmd,\n config_options, mpi_config, inputVar=None)\n err_handler.check_program_status(config_options, mpi_config)\n else:\n create_link(\"STAGEIV-PCP\", supplemental_precip.file_in2, stage4_tmp_nc, config_options, mpi_config)\n id_tmp = ioMod.open_netcdf_forcing(stage4_tmp_nc, config_options, mpi_config, False, lat_var, lon_var)\n\n # Check to see if we need to calculate regridding weights.\n calc_regrid_flag = check_supp_pcp_regrid_status(id_tmp, supplemental_precip, config_options,\n wrf_hydro_geo_meta, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n if calc_regrid_flag:\n if mpi_config.rank == 0:\n config_options.statusMsg = \"Calculating STAGE IV regridding weights.\"\n err_handler.log_msg(config_options, mpi_config)\n calculate_supp_pcp_weights(supplemental_precip, id_tmp, stage4_tmp_nc, config_options, mpi_config, lat_var, lon_var)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Regrid the input variables.\n var_tmp = None\n if mpi_config.rank == 0:\n if mpi_config.rank == 0:\n config_options.statusMsg = f\"Regridding STAGE IV '{supplemental_precip.netcdf_var_names[-1]}' Precipitation.\"\n err_handler.log_msg(config_options, mpi_config)\n try:\n var_tmp = id_tmp.variables[supplemental_precip.netcdf_var_names[-1]][0,:,:]\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to extract precipitation from STAGE IV file: \" + \\\n supplemental_precip.file_in1 + \" (\" + str(err) + \")\"\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n var_sub_tmp = mpi_config.scatter_array(supplemental_precip, var_tmp, config_options)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_in.data[:, :] = var_sub_tmp\n except (ValueError, KeyError, AttributeError) as err:\n config_options.errMsg = \"Unable to place STAGE IV precipitation into local ESMF field: \" + str(err)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n try:\n supplemental_precip.esmf_field_out = supplemental_precip.regridObj(supplemental_precip.esmf_field_in,\n supplemental_precip.esmf_field_out)\n except ValueError as ve:\n config_options.errMsg = \"Unable to regrid STAGE IV supplemental precipitation: \" + str(ve)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # Set any pixel cells outside the input domain to the global missing value.\n try:\n supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.regridded_mask == 0)] = \\\n config_options.globalNdv\n except (ValueError, ArithmeticError) as npe:\n config_options.errMsg = \"Unable to run mask search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n supplemental_precip.regridded_precip2[:, :] = supplemental_precip.esmf_field_out.data\n err_handler.check_program_status(config_options, mpi_config)\n\n # Convert the 6-hourly precipitation total to a rate of mm/s\n try:\n ind_valid = np.where(supplemental_precip.regridded_precip2 != config_options.globalNdv)\n supplemental_precip.regridded_precip2[ind_valid] = supplemental_precip.regridded_precip2[ind_valid] / 3600.0\n del ind_valid\n except (ValueError, ArithmeticError, AttributeError, KeyError) as npe:\n config_options.errMsg = \"Unable to run NDV search on STAGE IV supplemental precipitation: \" + str(npe)\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)\n\n # If we are on the first timestep, set the previous regridded field to be\n # the latest as there are no states for time 0.\n if config_options.current_output_step == 1:\n supplemental_precip.regridded_precip1[:, :] = \\\n supplemental_precip.regridded_precip2[:, :]\n err_handler.check_program_status(config_options, mpi_config)\n\n # Close the temporary NetCDF file and remove it.\n if mpi_config.rank == 0:\n try:\n id_tmp.close()\n except OSError:\n config_options.errMsg = \"Unable to close NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n try:\n os.remove(stage4_tmp_nc)\n except OSError:\n config_options.errMsg = \"Unable to remove NetCDF file: \" + stage4_tmp_nc\n err_handler.log_critical(config_options, mpi_config)\n err_handler.check_program_status(config_options, mpi_config)", "def test_migrate_on_compute_fail(self):\n server, source_host, target_host = self._create_server()\n\n # Wrap _prep_resize so we can concurrently delete the server.\n original_prep_resize = compute_manager.ComputeManager._prep_resize\n\n def wrap_prep_resize(*args, **kwargs):\n self._delete_server(server)\n return original_prep_resize(*args, **kwargs)\n\n self.stub_out('nova.compute.manager.ComputeManager._prep_resize',\n wrap_prep_resize)\n\n # Now start the cold migration which will fail in the dest compute.\n self.api.post_server_action(server['id'], {'migrate': None})\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the allocation revert happens.\n self._wait_for_action_fail_completion(\n server, instance_actions.MIGRATE, 'compute_prep_resize')\n self._assert_no_allocations(server)", "def test_update_virtualization_realm(self):\n pass", "def tear_down_mongo(self):\r\n split_db = self.split_mongo.db\r\n # old_mongo doesn't give a db attr, but all of the dbs are the same\r\n split_db.drop_collection(self.old_mongo.collection)", "def __init__(self, source='10.0.2.32', is_local=False):\n super().__init__(source, is_local)\n self.client = MongoClient(source)", "def _mongodump(self, port, errors):\n ret = run(\"mongodump %s --forceTableScan --host %s:%d -o %s/%s\" % (\"--oplog\" if self.name != \"config\" and self.name != \"mongos\" and not self.can_restart else \"\", self.host, port, self.backup_path, self.name))\n if ret != 0:\n errors.put(Exception(\"Error dumping %s server\" % self.name))\n traceback.print_exc()\n return\n\n ret = run(\"cd %s && tar zcvf %s.tar.gz %s && rm -rf %s\" % (self.backup_path, self.name, self.name, self.name))\n if ret != 0:\n errors.put(Exception(\"Error zipping %s server backup\" % self.name))\n traceback.print_exc()", "def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def test_port_update_after_vm_migration(self):\n # Create security group for the server\n group_create_body_update, _ = self._create_security_group()\n\n # Create server with security group\n name = data_utils.rand_name('server-with-security-group')\n server_id = self._create_server_with_sec_group(\n name, self.network['id'],\n group_create_body_update['security_group']['id'])\n self.assertTrue(self.verify_portgroup(self.network['id'], server_id))\n device_port = self.ports_client.list_ports(device_id=server_id)\n port_id = device_port['ports'][0]['id']\n floating_ip = self._associate_floating_ips(port_id=port_id)\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=False))\n cluster = cfg.CONF.VCENTER.cluster_in_use\n content = self._create_connection()\n host_dic = self._get_host_name(server_id)\n vm_host = host_dic['host_name']\n vm_host_ip = vm_host.name\n cluster_hosts = self._get_hosts_for_cluster(content, cluster)\n if len(cluster_hosts.host) < 2:\n msg = \"Min two hosts needed in cluster for Vmotion\"\n raise testtools.TestCase.skipException(msg)\n for host in cluster_hosts.host:\n if host.name != vm_host_ip:\n dest_host = host\n # Live Migration\n task = self._migrate_vm(content, server_id, dest_host)\n self._wait_for_task(task, content)\n # Update security group rule for the existing security group\n self.security_group_rules_client.create_security_group_rule(\n security_group_id=group_create_body_update['security_group']['id'],\n protocol='icmp',\n direction='ingress',\n ethertype=self.ethertype\n )\n self.assertTrue(self.ping_ip_address(\n floating_ip['floatingip']['floating_ip_address'],\n should_succeed=True))", "def _requires_inmigrate_from(self):\n existing = locate_live_service(self.consul, \"qemu-\" + self.name)\n\n if existing and existing[\"Address\"] != self.this_host:\n # Consul knows about a running VM. Lets try a migration.\n return existing[\"Address\"]\n\n if self.ceph.is_unlocked():\n # Consul doesn't know about a running VM and no volume is locked.\n # It doesn't make sense to live migrate this VM.\n return None\n\n if self.ceph.locked_by_me():\n # Consul doesn't know about a running VM and the volume is\n # locked by me, so it doesn't make sense to live migrate the VM.\n return None\n\n # The VM seems to be locked somewhere else, try to migrate it from\n # there.\n return self.ceph.locked_by()", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def nfvi_live_migrate_instance(instance_uuid, callback, to_host_name=None,\n block_storage_migration='auto', context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'live_migrate_instance', instance_uuid, to_host_name,\n block_storage_migration, context, callback=callback)\n return cmd_id", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=False)", "def cleanUp(name):\n clovr = pymongo.Connection().clovr\n clovr.clusters.remove(dict(name=name))", "def prepare_replica_for_exchange(self, replica):\n pass", "def OSSupportsIPv4(self) -> bool:", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def prepare_maintenance(self, errors):\n self.cmd_line_opts = self.client['admin'].command('getCmdLineOpts')\n\n if not self.can_restart:\n return\n port = 27017\n specified = False\n repl_index = None\n new_cmd_line = self.cmd_line_opts['argv'][:]\n for i in range(len(new_cmd_line)):\n if new_cmd_line[i] == '--port':\n logging.info(str(new_cmd_line))\n self.maintenance_port = int(new_cmd_line[i+1]) + 20000\n new_cmd_line[i+1] = str(self.maintenance_port)\n specified = True\n if new_cmd_line[i] == '--replSet':\n repl_index = i\n if not specified:\n new_cmd_line.append('--port')\n new_cmd_line.append('47017')\n if repl_index is not None:\n del new_cmd_line[repl_index+1]\n del new_cmd_line[repl_index]\n try:\n self._shutdown()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()\n return\n run(\" \".join(new_cmd_line))\n self.client = pymongo.MongoClient(self.host, self.maintenance_port)", "def migrate_volume(self, ctxt, volume, host):\n LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',\n {'id': volume['id'], 'host': host})\n\n false_ret = (False, None)\n\n if volume['status'] not in ('available', 'retyping'):\n LOG.warning(\"Volume status must be 'available' or 'retyping'.\"\n \" Current volume status: %s\", volume['status'])\n return false_ret\n\n if 'capabilities' not in host:\n LOG.warning(\"Unsupported host. No capabilities found\")\n return false_ret\n\n capabilities = host['capabilities']\n ns_shares = capabilities['ns_shares']\n dst_parts = capabilities['location_info'].split(':')\n dst_host, dst_volume = dst_parts[1:]\n\n if (capabilities.get('vendor_name') != 'Nexenta' or\n dst_parts[0] != self.__class__.__name__ or\n capabilities['free_capacity_gb'] < volume['size']):\n return false_ret\n\n nms = self.share2nms[volume['provider_location']]\n ssh_bindings = nms.appliance.ssh_list_bindings()\n shares = []\n for bind in ssh_bindings:\n for share in ns_shares:\n if (share.startswith(ssh_bindings[bind][3]) and\n ns_shares[share] >= volume['size']):\n shares.append(share)\n if len(shares) == 0:\n LOG.warning(\"Remote NexentaStor appliance at %s should be \"\n \"SSH-bound.\", share)\n return false_ret\n share = sorted(shares, key=ns_shares.get, reverse=True)[0]\n snapshot = {\n 'volume_name': volume['name'],\n 'volume_id': volume['id'],\n 'name': utils.get_migrate_snapshot_name(volume)\n }\n self.create_snapshot(snapshot)\n location = volume['provider_location']\n src = '%(share)s/%(volume)s@%(snapshot)s' % {\n 'share': location.split(':')[1].split('volumes/')[1],\n 'volume': volume['name'],\n 'snapshot': snapshot['name']\n }\n dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])\n try:\n nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot send source snapshot %(src)s to \"\n \"destination %(dst)s. Reason: %(exc)s\",\n {'src': src, 'dst': dst, 'exc': exc})\n return false_ret\n finally:\n try:\n self.delete_snapshot(snapshot)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary source snapshot \"\n \"%(src)s on NexentaStor Appliance: %(exc)s\",\n {'src': src, 'exc': exc})\n try:\n self.delete_volume(volume)\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete source volume %(volume)s on \"\n \"NexentaStor Appliance: %(exc)s\",\n {'volume': volume['name'], 'exc': exc})\n\n dst_nms = self._get_nms_for_url(capabilities['nms_url'])\n dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],\n volume['name'], snapshot['name'])\n try:\n dst_nms.snapshot.destroy(dst_snapshot, '')\n except utils.NexentaException as exc:\n LOG.warning(\"Cannot delete temporary destination snapshot \"\n \"%(dst)s on NexentaStor Appliance: %(exc)s\",\n {'dst': dst_snapshot, 'exc': exc})\n return True, {'provider_location': share}", "def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)", "def test_replace_host_subnet(self):\n pass", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def test_vm_migration(self):\n self.check_vm_host_after_migration(positive=True)", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def init_nova_network_ips(instance, server):\n ctx = context.ctx()\n\n management_ip = instance.management_ip\n internal_ip = instance.internal_ip\n\n for network_label in server.networks:\n nova_network = nova.client().networks.find(label=network_label)\n network = netaddr.IPNetwork(nova_network.cidr)\n for ip in server.networks[network_label]:\n if netaddr.IPAddress(ip) in network:\n internal_ip = instance.internal_ip or ip\n else:\n management_ip = instance.management_ip or ip\n\n if not CONF.use_floating_ips:\n management_ip = internal_ip\n\n conductor.instance_update(ctx, instance, {\"management_ip\": management_ip,\n \"internal_ip\": internal_ip})\n\n return internal_ip and management_ip", "def clone_collection(self, src_mongodb_uri, src_database, src_collection):\n # drop \"mongodb://\" suffix from uri\n src_conn = src_mongodb_uri[10:]\n if src_conn[-1] == \"/\":\n src_conn = src_conn[:-1]\n self.client.admin.command(\n {\"cloneCollection\": src_database + \".\" + src_collection, \"from\": src_conn}\n )", "def zone_map(zoneadm, passthru='__all__'):\n\n ret = {}\n\n for z in zoneadm:\n chunks = z.split(':')\n\n if len(chunks) < 8:\n raise NotImplementedError(\n 'cannot parse zoneadm output: %d fields in %s' %\n (len(chunks), zoneadm))\n\n if chunks[0] == '-':\n continue\n\n if passthru == '__all__' or chunks[1] in passthru:\n ret[chunks[0]] = chunks[1]\n\n \"\"\"\n Here's a cheat: if we're in an NGZ, we don't actually care about\n the zone ID. In fact, kstat `link` instances *don't* match to\n zone ID in NGZs. So, we fudge the key.\n \"\"\"\n\n if len(zoneadm) == 1 and ret.keys()[0] != 'global':\n ret = {'0': ret.values()[0]}\n\n return ret", "def post_instance_ip_create(self, resource_dict):\n pass", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def migrateLXCContainer(self,node,vmid,target):\n post_data = {'target': str(target)}\n data = self.connect('post','nodes/%s/lxc/%s/migrate' % (node,vmid), post_data)\n return data", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def save_list_mongo(listz):\t\n\tconnection = pymongo.Connection('localhost', 27017)\n\tdb = connection.database\n\tcollection = db.warez_collection", "def mongodb_drop():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n client = client.drop_database(\"tweetbase\")", "def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')", "def nocleanup():\n _lbs = kivaloo.servers.Server_lbs()\n _kvlds = kivaloo.servers.Server_kvlds()", "def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False", "def default_zone(self, region):\n if region == 'us-east-1':\n return region + 'b'\n else:\n return region + 'a'" ]
[ "0.5335725", "0.5240647", "0.50483406", "0.5008658", "0.49995932", "0.48313627", "0.4723101", "0.46692348", "0.46243554", "0.45962852", "0.45919207", "0.45395952", "0.45280132", "0.4510447", "0.45066556", "0.44934976", "0.44913507", "0.44898567", "0.44764742", "0.4457461", "0.44557413", "0.44541925", "0.4421727", "0.44142792", "0.440526", "0.44028085", "0.43880656", "0.43643665", "0.43622586", "0.4350899", "0.43500283", "0.43176797", "0.4314166", "0.4307358", "0.43016046", "0.42895672", "0.42685142", "0.4264117", "0.42579377", "0.42577174", "0.4257306", "0.42544925", "0.42529297", "0.42510766", "0.42474964", "0.42468157", "0.42432413", "0.42328435", "0.42310807", "0.42307022", "0.4227295", "0.4224918", "0.4220144", "0.42062026", "0.4206035", "0.4202689", "0.42021394", "0.42020738", "0.41983154", "0.4195918", "0.4195112", "0.4193654", "0.4155689", "0.41508576", "0.41501984", "0.41466358", "0.4142821", "0.4142821", "0.41403562", "0.41390818", "0.41389072", "0.41338167", "0.41274217", "0.4127028", "0.41265878", "0.41250607", "0.41249278", "0.41237944", "0.4123448", "0.4123448", "0.41231844", "0.41222444", "0.41191542", "0.40997574", "0.40965512", "0.40928242", "0.40914294", "0.40878356", "0.40829903", "0.40751633", "0.40744856", "0.40731126", "0.40728652", "0.40710318", "0.40663144", "0.4065537", "0.40637904", "0.40609634", "0.40598074", "0.4057236" ]
0.47672814
6
This operation is applicable only to replica set instances, but not to standalone instances or sharded cluster instances. > If you have applied for a public endpoint of the instance, you must first call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint.
def migrate_to_other_zone_with_options( self, request: dds_20151201_models.MigrateToOtherZoneRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.MigrateToOtherZoneResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.effective_time): query['EffectiveTime'] = request.effective_time if not UtilClient.is_unset(request.instance_id): query['InstanceId'] = request.instance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='MigrateToOtherZone', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.MigrateToOtherZoneResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')", "def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def test_replace_host_subnet(self):\n pass", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})", "def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)", "async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_patch_host_subnet(self):\n pass", "def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)", "def test_delete_host_subnet(self):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def test_delete_collection_host_subnet(self):\n pass", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def post_network_ipam_delete(self, resource_id, resource_dict):\n pass", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def post_instance_ip_create(self, resource_dict):\n pass", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_create_host_subnet(self):\n pass", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def test_07_associate_public_ip(self):\n # Validate the following\n # 1. Create a project\n # 2. Add some public Ips to the project\n # 3. Verify public IP assigned can only used to create PF/LB rules\n # inside project\n\n networks = Network.list(\n self.apiclient,\n projectid=self.project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list networks response returns a valid response\"\n )\n self.assertNotEqual(\n len(networks),\n 0,\n \"Check list networks response returns a valid network\"\n )\n network = networks[0]\n self.debug(\"Associating public IP for project: %s\" % self.project.id)\n public_ip = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.virtual_machine.zoneid,\n services=self.services[\"server\"],\n networkid=network.id,\n projectid=self.project.id\n )\n self.cleanup.append(public_ip)\n\n #Create NAT rule\n self.debug(\n \"Creating a NAT rule within project, VM ID: %s\" %\n self.virtual_machine.id)\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"created a NAT rule with ID: %s\" % nat_rule.id)\n nat_rule_response = NATRule.list(\n self.apiclient,\n id=nat_rule.id\n )\n self.assertEqual(\n isinstance(nat_rule_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(nat_rule_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n nat_rule_response[0].id,\n nat_rule.id,\n \"Check Correct Port forwarding Rule is returned\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n self.debug(\"Created LB rule for public IP: %s\" %\n public_ip.ipaddress)\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"Assigning VM: %s to LB rule: %s\" % (\n self.virtual_machine.name,\n lb_rule.id\n ))\n lb_rule.assign(self.apiclient, [self.virtual_machine])\n\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n #Create Firewall rule with configurations from settings file\n fw_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.ipaddress.id,\n protocol='TCP',\n cidrlist=[self.services[\"fw_rule\"][\"cidr\"]],\n startport=self.services[\"fw_rule\"][\"startport\"],\n endport=self.services[\"fw_rule\"][\"endport\"],\n projectid=self.project.id\n )\n self.debug(\"Created firewall rule: %s\" % fw_rule.id)\n\n # After Router start, FW rule should be in Active state\n fw_rules = FireWallRule.list(\n self.apiclient,\n id=fw_rule.id,\n )\n self.assertEqual(\n isinstance(fw_rules, list),\n True,\n \"Check for list FW rules response return valid data\"\n )\n\n self.assertEqual(\n fw_rules[0].state,\n 'Active',\n \"Check list load balancing rules\"\n )\n self.assertEqual(\n fw_rules[0].startport,\n self.services[\"fw_rule\"][\"startport\"],\n \"Check start port of firewall rule\"\n )\n\n self.assertEqual(\n fw_rules[0].endport,\n self.services[\"fw_rule\"][\"endport\"],\n \"Check end port of firewall rule\"\n )\n\n self.debug(\"Deploying VM for account: %s\" % self.account.name)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n )\n self.cleanup.append(virtual_machine_1)\n\n self.debug(\"VM state after deploy: %s\" % virtual_machine_1.state)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n self.debug(\"Creating NAT rule for VM (ID: %s) outside project\" %\n virtual_machine_1.id)\n with self.assertRaises(Exception):\n NATRule.create(\n self.apiclient,\n virtual_machine_1,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n )\n\n self.debug(\"Creating LB rule for public IP: %s outside project\" %\n public_ip.ipaddress)\n with self.assertRaises(Exception):\n LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n accountid=self.account.name\n )\n return", "def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def migrate_contract(network):\n print(network)", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def delete_public_ip(self, ip=None):\n raise NotImplementedError", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):\n args = {'instance_id': instance_id,\n 'host': host,\n 'network_id': network_id}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'add_fixed_ip_to_instance',\n 'args': args})", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def test_vpc_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_vpc_data(neo4j_session)\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})-[:RESOURCE]->(subnet:GCPSubnet)\n RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,\n subnet.private_ip_google_access\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {\n (\n n['vpc.id'],\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/global/networks/default',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n ),\n }\n assert actual_nodes == expected_nodes", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "def test_patch_namespaced_egress_network_policy(self):\n pass", "def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None", "async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)", "def pre_instance_ip_delete(self, resource_id):\n pass", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def disassociate_address(self, public_ip=None, association_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif association_id is not None:\r\n params['AssociationId'] = association_id\r\n\r\n return self.get_status('DisassociateAddress', params, verb='POST')", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_replace_cluster_network(self):\n pass", "def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def test_ipam_ip_addresses_delete(self):\n pass", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")", "def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})", "def test_networking_project_network_delete(self):\n pass", "def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)", "def test_delete_cluster_network(self):\n pass", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_endpoint(EndpointName=None):\n pass", "def post_floating_ip_delete(self, resource_id, resource_dict):\n pass", "def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def post_subnet_create(self, resource_dict):\n pass", "def create_public_ip(self):\n raise NotImplementedError", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def _disassociate_floating_ip(self, context, address, interface,\n instance_uuid):\n interface = CONF.public_interface or interface\n\n @utils.synchronized(six.text_type(address))\n def do_disassociate():\n # NOTE(vish): Note that we are disassociating in the db before we\n # actually remove the ip address on the host. We are\n # safe from races on this host due to the decorator,\n # but another host might grab the ip right away. We\n # don't worry about this case because the minuscule\n # window where the ip is on both hosts shouldn't cause\n # any problems.\n floating = objects.FloatingIP.disassociate(context, address)\n fixed = floating.fixed_ip\n if not fixed:\n # NOTE(vish): ip was already disassociated\n return\n if interface:\n # go go driver time\n self.l3driver.remove_floating_ip(address, fixed.address,\n interface, fixed.network)\n payload = dict(project_id=context.project_id,\n instance_id=instance_uuid,\n floating_ip=address)\n self.notifier.info(context,\n 'network.floating_ip.disassociate', payload)\n do_disassociate()", "def test_networking_project_network_tag_delete(self):\n pass", "def test_replace_namespaced_egress_network_policy(self):\n pass", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def test_delete_collection_namespaced_egress_network_policy(self):\n pass", "def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def post_virtual_network_update(self, resource_id, resource_dict):\n pass", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def test_delete_network(self):\n pass", "def unlink(address):", "def delete_network_profile(arn=None):\n pass", "def proxy(self):\n result = self.instances(role='stateless-body', format=\"PrivateIpAddress\")\n return result[0][0] if result else None", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def test_networking_project_network_update(self):\n pass", "def post_virtual_network_create(self, resource_dict):\n pass" ]
[ "0.60287136", "0.59219706", "0.555398", "0.5511613", "0.54330933", "0.5428225", "0.5323572", "0.5283788", "0.5276548", "0.51867646", "0.51824623", "0.5156002", "0.5132923", "0.5113544", "0.5092657", "0.5072642", "0.50216955", "0.4976981", "0.49680826", "0.49609733", "0.49414968", "0.48767704", "0.4873276", "0.48498288", "0.4838882", "0.4829115", "0.48182043", "0.48156497", "0.47867218", "0.47809005", "0.4763634", "0.4754242", "0.47502664", "0.4747155", "0.4741395", "0.4738247", "0.47308758", "0.4712483", "0.46967345", "0.46963295", "0.4692328", "0.4689062", "0.46879452", "0.46867636", "0.4682933", "0.46708584", "0.46575424", "0.46574804", "0.46465337", "0.4628688", "0.46239796", "0.46184325", "0.46181652", "0.4613814", "0.46112078", "0.4608405", "0.46034762", "0.45987776", "0.45892695", "0.4587805", "0.45877483", "0.45832485", "0.4583196", "0.45819354", "0.4578111", "0.45774752", "0.45683455", "0.45636573", "0.4557039", "0.4550361", "0.45477542", "0.4546314", "0.45457926", "0.45451862", "0.45448107", "0.45432955", "0.4535168", "0.45245528", "0.45177135", "0.45168558", "0.45155647", "0.45144904", "0.4512743", "0.45123014", "0.4510839", "0.45091224", "0.45046446", "0.4495404", "0.44917664", "0.44906375", "0.44878718", "0.44833416", "0.44790924", "0.44790268", "0.4474562", "0.44735518", "0.44711378", "0.44674352", "0.44672686", "0.44659948", "0.4465427" ]
0.0
-1
This operation is applicable only to replica set instances, but not to standalone instances or sharded cluster instances. > If you have applied for a public endpoint of the instance, you must first call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint.
async def migrate_to_other_zone_with_options_async( self, request: dds_20151201_models.MigrateToOtherZoneRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.MigrateToOtherZoneResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.effective_time): query['EffectiveTime'] = request.effective_time if not UtilClient.is_unset(request.instance_id): query['InstanceId'] = request.instance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='MigrateToOtherZone', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.MigrateToOtherZoneResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')", "def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def test_replace_host_subnet(self):\n pass", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})", "def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)", "async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_patch_host_subnet(self):\n pass", "def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)", "def test_delete_host_subnet(self):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def test_delete_collection_host_subnet(self):\n pass", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def post_network_ipam_delete(self, resource_id, resource_dict):\n pass", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def post_instance_ip_create(self, resource_dict):\n pass", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_create_host_subnet(self):\n pass", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def test_07_associate_public_ip(self):\n # Validate the following\n # 1. Create a project\n # 2. Add some public Ips to the project\n # 3. Verify public IP assigned can only used to create PF/LB rules\n # inside project\n\n networks = Network.list(\n self.apiclient,\n projectid=self.project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list networks response returns a valid response\"\n )\n self.assertNotEqual(\n len(networks),\n 0,\n \"Check list networks response returns a valid network\"\n )\n network = networks[0]\n self.debug(\"Associating public IP for project: %s\" % self.project.id)\n public_ip = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.virtual_machine.zoneid,\n services=self.services[\"server\"],\n networkid=network.id,\n projectid=self.project.id\n )\n self.cleanup.append(public_ip)\n\n #Create NAT rule\n self.debug(\n \"Creating a NAT rule within project, VM ID: %s\" %\n self.virtual_machine.id)\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"created a NAT rule with ID: %s\" % nat_rule.id)\n nat_rule_response = NATRule.list(\n self.apiclient,\n id=nat_rule.id\n )\n self.assertEqual(\n isinstance(nat_rule_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(nat_rule_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n nat_rule_response[0].id,\n nat_rule.id,\n \"Check Correct Port forwarding Rule is returned\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n self.debug(\"Created LB rule for public IP: %s\" %\n public_ip.ipaddress)\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"Assigning VM: %s to LB rule: %s\" % (\n self.virtual_machine.name,\n lb_rule.id\n ))\n lb_rule.assign(self.apiclient, [self.virtual_machine])\n\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n #Create Firewall rule with configurations from settings file\n fw_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.ipaddress.id,\n protocol='TCP',\n cidrlist=[self.services[\"fw_rule\"][\"cidr\"]],\n startport=self.services[\"fw_rule\"][\"startport\"],\n endport=self.services[\"fw_rule\"][\"endport\"],\n projectid=self.project.id\n )\n self.debug(\"Created firewall rule: %s\" % fw_rule.id)\n\n # After Router start, FW rule should be in Active state\n fw_rules = FireWallRule.list(\n self.apiclient,\n id=fw_rule.id,\n )\n self.assertEqual(\n isinstance(fw_rules, list),\n True,\n \"Check for list FW rules response return valid data\"\n )\n\n self.assertEqual(\n fw_rules[0].state,\n 'Active',\n \"Check list load balancing rules\"\n )\n self.assertEqual(\n fw_rules[0].startport,\n self.services[\"fw_rule\"][\"startport\"],\n \"Check start port of firewall rule\"\n )\n\n self.assertEqual(\n fw_rules[0].endport,\n self.services[\"fw_rule\"][\"endport\"],\n \"Check end port of firewall rule\"\n )\n\n self.debug(\"Deploying VM for account: %s\" % self.account.name)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n )\n self.cleanup.append(virtual_machine_1)\n\n self.debug(\"VM state after deploy: %s\" % virtual_machine_1.state)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n self.debug(\"Creating NAT rule for VM (ID: %s) outside project\" %\n virtual_machine_1.id)\n with self.assertRaises(Exception):\n NATRule.create(\n self.apiclient,\n virtual_machine_1,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n )\n\n self.debug(\"Creating LB rule for public IP: %s outside project\" %\n public_ip.ipaddress)\n with self.assertRaises(Exception):\n LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n accountid=self.account.name\n )\n return", "def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def migrate_contract(network):\n print(network)", "def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def delete_public_ip(self, ip=None):\n raise NotImplementedError", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):\n args = {'instance_id': instance_id,\n 'host': host,\n 'network_id': network_id}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'add_fixed_ip_to_instance',\n 'args': args})", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def test_vpc_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_vpc_data(neo4j_session)\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})-[:RESOURCE]->(subnet:GCPSubnet)\n RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,\n subnet.private_ip_google_access\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {\n (\n n['vpc.id'],\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/global/networks/default',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n ),\n }\n assert actual_nodes == expected_nodes", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None", "def test_patch_namespaced_egress_network_policy(self):\n pass", "async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def pre_instance_ip_delete(self, resource_id):\n pass", "def disassociate_address(self, public_ip=None, association_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif association_id is not None:\r\n params['AssociationId'] = association_id\r\n\r\n return self.get_status('DisassociateAddress', params, verb='POST')", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_replace_cluster_network(self):\n pass", "def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def test_ipam_ip_addresses_delete(self):\n pass", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")", "def test_networking_project_network_delete(self):\n pass", "def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})", "def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)", "def test_delete_cluster_network(self):\n pass", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_endpoint(EndpointName=None):\n pass", "def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())", "def create_public_ip(self):\n raise NotImplementedError", "def post_floating_ip_delete(self, resource_id, resource_dict):\n pass", "def post_subnet_create(self, resource_dict):\n pass", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def _disassociate_floating_ip(self, context, address, interface,\n instance_uuid):\n interface = CONF.public_interface or interface\n\n @utils.synchronized(six.text_type(address))\n def do_disassociate():\n # NOTE(vish): Note that we are disassociating in the db before we\n # actually remove the ip address on the host. We are\n # safe from races on this host due to the decorator,\n # but another host might grab the ip right away. We\n # don't worry about this case because the minuscule\n # window where the ip is on both hosts shouldn't cause\n # any problems.\n floating = objects.FloatingIP.disassociate(context, address)\n fixed = floating.fixed_ip\n if not fixed:\n # NOTE(vish): ip was already disassociated\n return\n if interface:\n # go go driver time\n self.l3driver.remove_floating_ip(address, fixed.address,\n interface, fixed.network)\n payload = dict(project_id=context.project_id,\n instance_id=instance_uuid,\n floating_ip=address)\n self.notifier.info(context,\n 'network.floating_ip.disassociate', payload)\n do_disassociate()", "def test_networking_project_network_tag_delete(self):\n pass", "def test_replace_namespaced_egress_network_policy(self):\n pass", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def test_delete_collection_namespaced_egress_network_policy(self):\n pass", "def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def post_virtual_network_update(self, resource_id, resource_dict):\n pass", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def test_delete_network(self):\n pass", "def unlink(address):", "def delete_network_profile(arn=None):\n pass", "def proxy(self):\n result = self.instances(role='stateless-body', format=\"PrivateIpAddress\")\n return result[0][0] if result else None", "def test_networking_project_network_update(self):\n pass", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def post_virtual_network_create(self, resource_dict):\n pass" ]
[ "0.60280126", "0.59217405", "0.55564255", "0.5512042", "0.5434306", "0.5430282", "0.5323362", "0.5282437", "0.52760875", "0.5187943", "0.5184374", "0.51586765", "0.51333106", "0.5116296", "0.509318", "0.50725955", "0.5023402", "0.49776056", "0.4968555", "0.49613032", "0.49407306", "0.48763958", "0.48752776", "0.48501366", "0.48401412", "0.48311952", "0.48208976", "0.4816002", "0.47860056", "0.47845626", "0.47660986", "0.47572783", "0.47503948", "0.47490373", "0.4740215", "0.47399968", "0.47322968", "0.47128332", "0.4700301", "0.4699432", "0.46937734", "0.4691572", "0.46877146", "0.468677", "0.46857974", "0.46705705", "0.46560082", "0.46551418", "0.46480092", "0.46315104", "0.46263182", "0.4620582", "0.4619797", "0.4613351", "0.46117777", "0.46112534", "0.46027866", "0.46005365", "0.45911548", "0.4588832", "0.45872447", "0.45864078", "0.45835546", "0.4580071", "0.45783848", "0.45772827", "0.4566983", "0.456342", "0.45577243", "0.4550475", "0.45498946", "0.4548536", "0.4547736", "0.45471382", "0.4546539", "0.4544512", "0.45374405", "0.45261577", "0.45188352", "0.45187342", "0.45161477", "0.4515057", "0.4514318", "0.45142004", "0.45113945", "0.45092", "0.45063365", "0.44983268", "0.4492198", "0.4491996", "0.44914007", "0.44860035", "0.4481134", "0.44790444", "0.44756478", "0.44715762", "0.4470821", "0.4470128", "0.446876", "0.44686174", "0.44683963" ]
0.0
-1
This operation is applicable only to replica set instances, but not to standalone instances or sharded cluster instances. > If you have applied for a public endpoint of the instance, you must first call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint.
def migrate_to_other_zone( self, request: dds_20151201_models.MigrateToOtherZoneRequest, ) -> dds_20151201_models.MigrateToOtherZoneResponse: runtime = util_models.RuntimeOptions() return self.migrate_to_other_zone_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')", "def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def test_replace_host_subnet(self):\n pass", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})", "def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)", "async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_patch_host_subnet(self):\n pass", "def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)", "def test_delete_host_subnet(self):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def test_delete_collection_host_subnet(self):\n pass", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def post_network_ipam_delete(self, resource_id, resource_dict):\n pass", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def post_instance_ip_create(self, resource_dict):\n pass", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_create_host_subnet(self):\n pass", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def test_07_associate_public_ip(self):\n # Validate the following\n # 1. Create a project\n # 2. Add some public Ips to the project\n # 3. Verify public IP assigned can only used to create PF/LB rules\n # inside project\n\n networks = Network.list(\n self.apiclient,\n projectid=self.project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list networks response returns a valid response\"\n )\n self.assertNotEqual(\n len(networks),\n 0,\n \"Check list networks response returns a valid network\"\n )\n network = networks[0]\n self.debug(\"Associating public IP for project: %s\" % self.project.id)\n public_ip = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.virtual_machine.zoneid,\n services=self.services[\"server\"],\n networkid=network.id,\n projectid=self.project.id\n )\n self.cleanup.append(public_ip)\n\n #Create NAT rule\n self.debug(\n \"Creating a NAT rule within project, VM ID: %s\" %\n self.virtual_machine.id)\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"created a NAT rule with ID: %s\" % nat_rule.id)\n nat_rule_response = NATRule.list(\n self.apiclient,\n id=nat_rule.id\n )\n self.assertEqual(\n isinstance(nat_rule_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(nat_rule_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n nat_rule_response[0].id,\n nat_rule.id,\n \"Check Correct Port forwarding Rule is returned\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n self.debug(\"Created LB rule for public IP: %s\" %\n public_ip.ipaddress)\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"Assigning VM: %s to LB rule: %s\" % (\n self.virtual_machine.name,\n lb_rule.id\n ))\n lb_rule.assign(self.apiclient, [self.virtual_machine])\n\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n #Create Firewall rule with configurations from settings file\n fw_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.ipaddress.id,\n protocol='TCP',\n cidrlist=[self.services[\"fw_rule\"][\"cidr\"]],\n startport=self.services[\"fw_rule\"][\"startport\"],\n endport=self.services[\"fw_rule\"][\"endport\"],\n projectid=self.project.id\n )\n self.debug(\"Created firewall rule: %s\" % fw_rule.id)\n\n # After Router start, FW rule should be in Active state\n fw_rules = FireWallRule.list(\n self.apiclient,\n id=fw_rule.id,\n )\n self.assertEqual(\n isinstance(fw_rules, list),\n True,\n \"Check for list FW rules response return valid data\"\n )\n\n self.assertEqual(\n fw_rules[0].state,\n 'Active',\n \"Check list load balancing rules\"\n )\n self.assertEqual(\n fw_rules[0].startport,\n self.services[\"fw_rule\"][\"startport\"],\n \"Check start port of firewall rule\"\n )\n\n self.assertEqual(\n fw_rules[0].endport,\n self.services[\"fw_rule\"][\"endport\"],\n \"Check end port of firewall rule\"\n )\n\n self.debug(\"Deploying VM for account: %s\" % self.account.name)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n )\n self.cleanup.append(virtual_machine_1)\n\n self.debug(\"VM state after deploy: %s\" % virtual_machine_1.state)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n self.debug(\"Creating NAT rule for VM (ID: %s) outside project\" %\n virtual_machine_1.id)\n with self.assertRaises(Exception):\n NATRule.create(\n self.apiclient,\n virtual_machine_1,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n )\n\n self.debug(\"Creating LB rule for public IP: %s outside project\" %\n public_ip.ipaddress)\n with self.assertRaises(Exception):\n LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n accountid=self.account.name\n )\n return", "def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def migrate_contract(network):\n print(network)", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def delete_public_ip(self, ip=None):\n raise NotImplementedError", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):\n args = {'instance_id': instance_id,\n 'host': host,\n 'network_id': network_id}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'add_fixed_ip_to_instance',\n 'args': args})", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def test_vpc_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_vpc_data(neo4j_session)\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})-[:RESOURCE]->(subnet:GCPSubnet)\n RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,\n subnet.private_ip_google_access\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {\n (\n n['vpc.id'],\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/global/networks/default',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n ),\n }\n assert actual_nodes == expected_nodes", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "def test_patch_namespaced_egress_network_policy(self):\n pass", "def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None", "async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)", "def pre_instance_ip_delete(self, resource_id):\n pass", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def disassociate_address(self, public_ip=None, association_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif association_id is not None:\r\n params['AssociationId'] = association_id\r\n\r\n return self.get_status('DisassociateAddress', params, verb='POST')", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_replace_cluster_network(self):\n pass", "def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def test_ipam_ip_addresses_delete(self):\n pass", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")", "def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})", "def test_networking_project_network_delete(self):\n pass", "def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)", "def test_delete_cluster_network(self):\n pass", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_endpoint(EndpointName=None):\n pass", "def post_floating_ip_delete(self, resource_id, resource_dict):\n pass", "def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def post_subnet_create(self, resource_dict):\n pass", "def create_public_ip(self):\n raise NotImplementedError", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def _disassociate_floating_ip(self, context, address, interface,\n instance_uuid):\n interface = CONF.public_interface or interface\n\n @utils.synchronized(six.text_type(address))\n def do_disassociate():\n # NOTE(vish): Note that we are disassociating in the db before we\n # actually remove the ip address on the host. We are\n # safe from races on this host due to the decorator,\n # but another host might grab the ip right away. We\n # don't worry about this case because the minuscule\n # window where the ip is on both hosts shouldn't cause\n # any problems.\n floating = objects.FloatingIP.disassociate(context, address)\n fixed = floating.fixed_ip\n if not fixed:\n # NOTE(vish): ip was already disassociated\n return\n if interface:\n # go go driver time\n self.l3driver.remove_floating_ip(address, fixed.address,\n interface, fixed.network)\n payload = dict(project_id=context.project_id,\n instance_id=instance_uuid,\n floating_ip=address)\n self.notifier.info(context,\n 'network.floating_ip.disassociate', payload)\n do_disassociate()", "def test_networking_project_network_tag_delete(self):\n pass", "def test_replace_namespaced_egress_network_policy(self):\n pass", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def test_delete_collection_namespaced_egress_network_policy(self):\n pass", "def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def post_virtual_network_update(self, resource_id, resource_dict):\n pass", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def test_delete_network(self):\n pass", "def unlink(address):", "def delete_network_profile(arn=None):\n pass", "def proxy(self):\n result = self.instances(role='stateless-body', format=\"PrivateIpAddress\")\n return result[0][0] if result else None", "def post_subnet_update(self, resource_id, resource_dict):\n pass", "def test_networking_project_network_update(self):\n pass", "def post_virtual_network_create(self, resource_dict):\n pass" ]
[ "0.60287136", "0.59219706", "0.555398", "0.5511613", "0.54330933", "0.5428225", "0.5323572", "0.5283788", "0.5276548", "0.51867646", "0.51824623", "0.5156002", "0.5132923", "0.5113544", "0.5092657", "0.5072642", "0.50216955", "0.4976981", "0.49680826", "0.49609733", "0.49414968", "0.48767704", "0.4873276", "0.48498288", "0.4838882", "0.4829115", "0.48182043", "0.48156497", "0.47867218", "0.47809005", "0.4763634", "0.4754242", "0.47502664", "0.4747155", "0.4741395", "0.4738247", "0.47308758", "0.4712483", "0.46967345", "0.46963295", "0.4692328", "0.4689062", "0.46879452", "0.46867636", "0.4682933", "0.46708584", "0.46575424", "0.46574804", "0.46465337", "0.4628688", "0.46239796", "0.46184325", "0.46181652", "0.4613814", "0.46112078", "0.4608405", "0.46034762", "0.45987776", "0.45892695", "0.4587805", "0.45877483", "0.45832485", "0.4583196", "0.45819354", "0.4578111", "0.45774752", "0.45683455", "0.45636573", "0.4557039", "0.4550361", "0.45477542", "0.4546314", "0.45457926", "0.45451862", "0.45448107", "0.45432955", "0.4535168", "0.45245528", "0.45177135", "0.45168558", "0.45155647", "0.45144904", "0.4512743", "0.45123014", "0.4510839", "0.45091224", "0.45046446", "0.4495404", "0.44917664", "0.44906375", "0.44878718", "0.44833416", "0.44790924", "0.44790268", "0.4474562", "0.44735518", "0.44711378", "0.44674352", "0.44672686", "0.44659948", "0.4465427" ]
0.0
-1
This operation is applicable only to replica set instances, but not to standalone instances or sharded cluster instances. > If you have applied for a public endpoint of the instance, you must first call the [ReleasePublicNetworkAddress](~~67604~~) operation to release the public endpoint.
async def migrate_to_other_zone_async( self, request: dds_20151201_models.MigrateToOtherZoneRequest, ) -> dds_20151201_models.MigrateToOtherZoneResponse: runtime = util_models.RuntimeOptions() return await self.migrate_to_other_zone_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')", "def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def disassociate_elastic_ip(ElasticIp=None):\n pass", "def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )", "def test_replace_host_subnet(self):\n pass", "def post_instance_ip_delete(self, resource_id, resource_dict):\n pass", "def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})", "def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)", "async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def test_patch_host_subnet(self):\n pass", "def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)", "def test_delete_host_subnet(self):\n pass", "def test_deploy_instance_with_network_and_associate_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Router with an external network gateway\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n router_name = TEST_ROUTER_PREFIX + \"_public_ip_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 247\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_public_ip_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False)\n\n # Associate Public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)", "def test_delete_collection_host_subnet(self):\n pass", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)", "def deregister_elastic_ip(ElasticIp=None):\n pass", "def post_network_ipam_delete(self, resource_id, resource_dict):\n pass", "def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)", "def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def post_instance_ip_create(self, resource_dict):\n pass", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_create_host_subnet(self):\n pass", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def test_07_associate_public_ip(self):\n # Validate the following\n # 1. Create a project\n # 2. Add some public Ips to the project\n # 3. Verify public IP assigned can only used to create PF/LB rules\n # inside project\n\n networks = Network.list(\n self.apiclient,\n projectid=self.project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list networks response returns a valid response\"\n )\n self.assertNotEqual(\n len(networks),\n 0,\n \"Check list networks response returns a valid network\"\n )\n network = networks[0]\n self.debug(\"Associating public IP for project: %s\" % self.project.id)\n public_ip = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.virtual_machine.zoneid,\n services=self.services[\"server\"],\n networkid=network.id,\n projectid=self.project.id\n )\n self.cleanup.append(public_ip)\n\n #Create NAT rule\n self.debug(\n \"Creating a NAT rule within project, VM ID: %s\" %\n self.virtual_machine.id)\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"created a NAT rule with ID: %s\" % nat_rule.id)\n nat_rule_response = NATRule.list(\n self.apiclient,\n id=nat_rule.id\n )\n self.assertEqual(\n isinstance(nat_rule_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(nat_rule_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n nat_rule_response[0].id,\n nat_rule.id,\n \"Check Correct Port forwarding Rule is returned\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n self.debug(\"Created LB rule for public IP: %s\" %\n public_ip.ipaddress)\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"Assigning VM: %s to LB rule: %s\" % (\n self.virtual_machine.name,\n lb_rule.id\n ))\n lb_rule.assign(self.apiclient, [self.virtual_machine])\n\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n #Create Firewall rule with configurations from settings file\n fw_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.ipaddress.id,\n protocol='TCP',\n cidrlist=[self.services[\"fw_rule\"][\"cidr\"]],\n startport=self.services[\"fw_rule\"][\"startport\"],\n endport=self.services[\"fw_rule\"][\"endport\"],\n projectid=self.project.id\n )\n self.debug(\"Created firewall rule: %s\" % fw_rule.id)\n\n # After Router start, FW rule should be in Active state\n fw_rules = FireWallRule.list(\n self.apiclient,\n id=fw_rule.id,\n )\n self.assertEqual(\n isinstance(fw_rules, list),\n True,\n \"Check for list FW rules response return valid data\"\n )\n\n self.assertEqual(\n fw_rules[0].state,\n 'Active',\n \"Check list load balancing rules\"\n )\n self.assertEqual(\n fw_rules[0].startport,\n self.services[\"fw_rule\"][\"startport\"],\n \"Check start port of firewall rule\"\n )\n\n self.assertEqual(\n fw_rules[0].endport,\n self.services[\"fw_rule\"][\"endport\"],\n \"Check end port of firewall rule\"\n )\n\n self.debug(\"Deploying VM for account: %s\" % self.account.name)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n )\n self.cleanup.append(virtual_machine_1)\n\n self.debug(\"VM state after deploy: %s\" % virtual_machine_1.state)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n self.debug(\"Creating NAT rule for VM (ID: %s) outside project\" %\n virtual_machine_1.id)\n with self.assertRaises(Exception):\n NATRule.create(\n self.apiclient,\n virtual_machine_1,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n )\n\n self.debug(\"Creating LB rule for public IP: %s outside project\" %\n public_ip.ipaddress)\n with self.assertRaises(Exception):\n LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n accountid=self.account.name\n )\n return", "def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s", "def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)", "def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)", "def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)", "def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def post_virtual_network_delete(self, resource_id, resource_dict):\n pass", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def migrate_contract(network):\n print(network)", "def detach_public_ip(self, name=None, ip=None):\n raise NotImplementedError", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def delete_public_ip(self, ip=None):\n raise NotImplementedError", "def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass", "def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):\n args = {'instance_id': instance_id,\n 'host': host,\n 'network_id': network_id}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'add_fixed_ip_to_instance',\n 'args': args})", "def post_subnet_delete(self, resource_id, resource_dict):\n pass", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)", "def test_vpc_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_vpc_data(neo4j_session)\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})-[:RESOURCE]->(subnet:GCPSubnet)\n RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range,\n subnet.private_ip_google_access\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {\n (\n n['vpc.id'],\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/global/networks/default',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n ),\n }\n assert actual_nodes == expected_nodes", "def test_nic_to_subnets(neo4j_session):\n _ensure_local_neo4j_has_test_subnet_data(neo4j_session)\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n subnet_query = \"\"\"\n MATCH (nic:GCPNetworkInterface{id:$NicId})-[:PART_OF_SUBNET]->(subnet:GCPSubnet)\n return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range\n \"\"\"\n nodes = neo4j_session.run(\n subnet_query,\n NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n )\n actual_nodes = {\n (\n n['nic.nic_id'],\n n['nic.private_ip'],\n n['subnet.id'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n ) for n in nodes\n }\n expected_nodes = {(\n 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0',\n '10.0.0.3',\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n '10.0.0.1',\n '10.0.0.0/20',\n )}\n assert actual_nodes == expected_nodes", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None", "def test_patch_namespaced_egress_network_policy(self):\n pass", "async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)", "def pre_instance_ip_delete(self, resource_id):\n pass", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def disassociate_address(self, public_ip=None, association_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif association_id is not None:\r\n params['AssociationId'] = association_id\r\n\r\n return self.get_status('DisassociateAddress', params, verb='POST')", "def create_network(region_name, vpc_cidr, tag_prefix,\n tls_priv_key=None, tls_fullchain_cert=None,\n ssh_key_name=None, ssh_key_content=None, sally_ip=None,\n s3_logs_bucket=None, s3_identities_bucket=None,\n storage_enckey=None,\n dry_run=False):\n sg_tag_prefix = tag_prefix\n\n LOGGER.info(\"Provisions network ...\")\n ec2_client = boto3.client('ec2', region_name=region_name)\n\n # Create a VPC\n vpc_id, vpc_cidr_read = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n if vpc_id:\n if vpc_cidr != vpc_cidr_read:\n raise RuntimeError(\n \"%s cidr block for VPC is %s while it was expected to be %s\" %\n (tag_prefix, vpc_cidr_read, vpc_cidr))\n else:\n if not vpc_cidr:\n raise RuntimeError(\n \"%s could not find VPC and no cidr block is specified\"\\\n \" to create one.\" % tag_prefix)\n resp = ec2_client.create_vpc(\n DryRun=dry_run,\n CidrBlock=vpc_cidr,\n AmazonProvidedIpv6CidrBlock=False,\n InstanceTenancy='default')\n vpc_id = resp['Vpc']['VpcId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[vpc_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s-vpc\" % tag_prefix}])\n LOGGER.info(\"%s created VPC %s\", tag_prefix, vpc_id)\n\n # Create subnets for app, dbs and web services\n # ELB will require that there is at least one subnet per availability zones.\n # RDS will require that there is at least two subnets for databases.\n resp = ec2_client.describe_availability_zones()\n zones = {(zone['ZoneId'], zone['ZoneName'])\n for zone in resp['AvailabilityZones']}\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, zones=zones, region_name=region_name)\n\n LOGGER.info(\"%s provisioning web subnets...\", tag_prefix)\n web_zones = set([])\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if subnet:\n web_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - web_zones\n zone_id, zone_name = available_zones.pop()\n try:\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s web\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n web_subnet_by_cidrs[cidr_block] = subnet\n web_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidSubnet.Conflict':\n raise\n # We have a conflict, let's just skip over it.\n LOGGER.warning(\n \"%s (skip) created subnet in zone %s because '%s'\",\n tag_prefix, zone_name, err)\n if subnet and not subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': True})\n LOGGER.info(\"%s modify web subnet %s so instance can receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning dbs subnets...\", tag_prefix)\n dbs_zones = set([])\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if subnet:\n dbs_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - dbs_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s dbs\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n dbs_subnet_by_cidrs[cidr_block] = subnet\n dbs_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in zone %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify dbs subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n LOGGER.info(\"%s provisioning apps subnets...\", tag_prefix)\n app_zones = set([])\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if subnet:\n app_zones |= {\n (subnet['AvailabilityZoneId'], subnet['AvailabilityZone'])}\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n if not subnet:\n available_zones = zones - app_zones\n zone_id, zone_name = available_zones.pop()\n resp = ec2_client.create_subnet(\n AvailabilityZoneId=zone_id,\n CidrBlock=cidr_block,\n VpcId=vpc_id,\n # COMMIT MSG:\n # this requires boto3>=1.14, using `createTag` might fail\n # because the subnet is not fully created yet.\n TagSpecifications=[{\n 'ResourceType': 'subnet',\n 'Tags': [\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s %s app\" % (tag_prefix, zone_name)}]}],\n DryRun=dry_run)\n subnet = resp['Subnet']\n app_subnet_by_cidrs[cidr_block] = subnet\n app_zones |= set([(zone_id, zone_name)])\n subnet_id = subnet['SubnetId']\n LOGGER.info(\"%s created subnet %s in %s for cidr %s\",\n tag_prefix, subnet_id, zone_name, cidr_block)\n if subnet['MapPublicIpOnLaunch']:\n subnet_id = subnet['SubnetId']\n if not dry_run:\n resp = ec2_client.modify_subnet_attribute(\n SubnetId=subnet_id,\n MapPublicIpOnLaunch={'Value': False})\n LOGGER.info(\"%s modify app subnet %s so instance do not receive\"\\\n \" a public IP by default\", tag_prefix, subnet_id)\n\n # Ensure that the VPC has an Internet Gateway.\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.describe_internet_gateways(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['InternetGateways']:\n igw_id = resp['InternetGateways'][0]['InternetGatewayId']\n LOGGER.info(\"%s found Internet Gateway %s\", tag_prefix, igw_id)\n else:\n resp = ec2_client.create_internet_gateway(DryRun=dry_run)\n igw_id = resp['InternetGateway']['InternetGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[igw_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s internet gateway\" % tag_prefix}])\n LOGGER.info(\"%s created Internet Gateway %s\", tag_prefix, igw_id)\n resp = ec2_client.attach_internet_gateway(\n DryRun=dry_run,\n InternetGatewayId=igw_id,\n VpcId=vpc_id)\n\n # Create the NAT gateway by which private subnets connect to Internet\n # XXX Why do we have a Network interface eni-****?\n nat_elastic_ip = None\n web_elastic_ip = None\n resp = ec2_client.describe_addresses(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Addresses']:\n for resp_address in resp['Addresses']:\n for resp_tag in resp_address['Tags']:\n if resp_tag['Key'] == 'Name':\n if 'NAT gateway' in resp_tag['Value']:\n nat_elastic_ip = resp_address['AllocationId']\n break\n if 'Sally' in resp_tag['Value']:\n web_elastic_ip = resp_address['AllocationId']\n break\n\n if nat_elastic_ip:\n LOGGER.info(\"%s found NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n nat_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway public IP\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway public IP %s\",\n tag_prefix, nat_elastic_ip)\n if web_elastic_ip:\n LOGGER.info(\"%s found Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n else:\n resp = ec2_client.allocate_address(\n DryRun=dry_run,\n Domain='vpc')\n web_elastic_ip = resp['AllocationId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[web_elastic_ip],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s Sally public IP\" % tag_prefix}])\n LOGGER.info(\"%s created Sally public IP %s\",\n tag_prefix, web_elastic_ip)\n\n # We have 2 EIP addresses. They need to be connected to machines\n # running in an Internet facing subnet.\n client_token = tag_prefix\n # XXX shouldn't it be the first web subnet instead?\n resp = ec2_client.describe_nat_gateways(Filters=[\n {'Name': \"vpc-id\", 'Values': [vpc_id]},\n {'Name': \"state\", 'Values': ['pending', 'available']}])\n if resp['NatGateways']:\n if len(resp['NatGateways']) > 1:\n LOGGER.warning(\"%s found more than one NAT gateway.\"\\\n \" Using first one in the list.\", tag_prefix)\n nat_gateway = resp['NatGateways'][0]\n nat_gateway_id = nat_gateway['NatGatewayId']\n nat_gateway_subnet_id = nat_gateway['SubnetId']\n LOGGER.info(\"%s found NAT gateway %s\", tag_prefix, nat_gateway_id)\n else:\n nat_gateway_subnet_id = next(web_subnet_by_cidrs.values())['SubnetId']\n resp = ec2_client.create_nat_gateway(\n AllocationId=nat_elastic_ip,\n ClientToken=client_token,\n SubnetId=nat_gateway_subnet_id)\n nat_gateway_id = resp['NatGateway']['NatGatewayId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[nat_gateway_id],\n Tags=[{'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\",\n 'Value': \"%s NAT gateway\" % tag_prefix}])\n LOGGER.info(\"%s created NAT gateway %s\",\n tag_prefix, nat_gateway_id)\n\n # Set up public and NAT-protected route tables\n resp = ec2_client.describe_route_tables(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n public_route_table_id = None\n private_route_table_id = None\n for route_table in resp['RouteTables']:\n for route in route_table['Routes']:\n if 'GatewayId' in route and route['GatewayId'] == igw_id:\n public_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found public route table %s\",\n tag_prefix, public_route_table_id)\n break\n if ('NatGatewayId' in route and\n route['NatGatewayId'] == nat_gateway_id):\n private_route_table_id = route_table['RouteTableId']\n LOGGER.info(\"%s found private route table %s\",\n tag_prefix, private_route_table_id)\n\n if not public_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n public_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[public_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s public\" % tag_prefix}])\n LOGGER.info(\"%s created public subnet route table %s\",\n tag_prefix, public_route_table_id)\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw_id,\n RouteTableId=public_route_table_id)\n\n if not private_route_table_id:\n resp = ec2_client.create_route_table(\n DryRun=dry_run,\n VpcId=vpc_id)\n private_route_table_id = resp['RouteTable']['RouteTableId']\n ec2_client.create_tags(\n DryRun=dry_run,\n Resources=[private_route_table_id],\n Tags=[\n {'Key': \"Prefix\", 'Value': tag_prefix},\n {'Key': \"Name\", 'Value': \"%s internal\" % tag_prefix}])\n private_route_table_id = resp['RouteTable']['RouteTableId']\n LOGGER.info(\"%s created private route table %s\",\n tag_prefix, private_route_table_id)\n for _ in range(0, NB_RETRIES):\n # The NAT Gateway takes some time to be fully operational.\n try:\n resp = ec2_client.create_route(\n DryRun=dry_run,\n DestinationCidrBlock='0.0.0.0/0',\n NatGatewayId=nat_gateway_id,\n RouteTableId=private_route_table_id)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidNatGatewayID.NotFound':\n raise\n time.sleep(RETRY_WAIT_DELAY)\n\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n RouteTableIds=[public_route_table_id])\n assocs = resp['RouteTables'][0]['Associations']\n if len(assocs) > 1:\n LOGGER.warning(\"%s found more than one route table association for\"\\\n \" public route table. Using first one in the list.\", tag_prefix)\n if not assocs[0]['Main']:\n LOGGER.warning(\"%s public route table is not the main one for the VPC.\",\n tag_prefix)\n\n for cidr_block, subnet in web_subnet_by_cidrs.items():\n if not subnet:\n # Maybe there was a conflict and we skipped this cidr_block.\n continue\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = not bool(resp['RouteTables'])\n if found_association:\n LOGGER.info(\n \"%s found public route table %s associated to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=public_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate public route table %s to web subnet %s\",\n tag_prefix, public_route_table_id, subnet_id)\n\n for cidr_block, subnet in dbs_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to dbs subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n for cidr_block, subnet in app_subnet_by_cidrs.items():\n subnet_id = subnet['SubnetId']\n resp = ec2_client.describe_route_tables(\n DryRun=dry_run,\n Filters=[{\n 'Name': 'association.subnet-id',\n 'Values': [subnet_id]\n }])\n # The Main route table does not show as an explicit association.\n found_association = False\n if resp['RouteTables']:\n found_association = (\n resp['RouteTables'][0]['Associations'][0]['RouteTableId'] ==\n private_route_table_id\n )\n if found_association:\n LOGGER.info(\n \"%s found private route table %s associated to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n else:\n resp = ec2_client.associate_route_table(\n DryRun=dry_run,\n RouteTableId=private_route_table_id,\n SubnetId=subnet_id)\n LOGGER.info(\n \"%s associate private route table %s to app subnet %s\",\n tag_prefix, private_route_table_id, subnet_id)\n\n # Create the ELB, proxies and databases security groups\n # The app security group (as the instance role) will be specific\n # to the application.\n #pylint:disable=unbalanced-tuple-unpacking\n moat_name, vault_name, gate_name, kitchen_door_name = \\\n _get_security_group_names([\n 'moat', 'vault', 'castle-gate', 'kitchen-door'],\n tag_prefix=sg_tag_prefix)\n moat_sg_id, vault_sg_id, gate_sg_id, kitchen_door_sg_id = \\\n _get_security_group_ids(\n [moat_name, vault_name, gate_name, kitchen_door_name],\n tag_prefix, vpc_id=vpc_id, ec2_client=ec2_client)\n\n update_moat_rules = (not moat_sg_id)\n update_gate_rules = (not gate_sg_id)\n update_vault_rules = (not vault_sg_id)\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n\n if not moat_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s ELB' % tag_prefix,\n GroupName=moat_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n moat_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, moat_name, moat_sg_id)\n if not gate_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s session managers' % tag_prefix,\n GroupName=gate_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n gate_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, gate_name, gate_sg_id)\n if not vault_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s databases' % tag_prefix,\n GroupName=vault_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n vault_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, vault_name, vault_sg_id)\n # kitchen_door_sg_id: Kitchen door security group is created later on\n # if we have ssh keys.\n\n resp = ec2_client.describe_security_groups(\n DryRun=dry_run,\n GroupIds=[moat_sg_id, vault_sg_id, gate_sg_id])\n for security_group in resp['SecurityGroups']:\n if security_group['GroupId'] == moat_sg_id:\n # moat rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, moat_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': '0.0.0.0/0'},\n {'port': 80, 'source': '::/0'},\n {'port': 443, 'source': '0.0.0.0/0'},\n {'port': 443, 'source': '::/0'},\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == gate_sg_id:\n # castle-gate rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, gate_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 80, 'source': moat_sg_id},\n {'port': 443, 'source': moat_sg_id}\n ],\n tag_prefix=tag_prefix)\n elif security_group['GroupId'] == vault_sg_id:\n # vault rules\n LOGGER.info(\"%s check ingress rules for %s\", tag_prefix, vault_name)\n check_security_group_ingress(security_group, expected_rules=[\n {'port': 5432, 'source': gate_sg_id}\n ],\n tag_prefix=tag_prefix)\n\n # moat allow rules\n if update_moat_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=moat_sg_id,\n IpPermissions=[{\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 80\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0'\n }],\n 'ToPort': 443\n }, {\n 'FromPort': 443,\n 'IpProtocol': 'tcp',\n 'Ipv6Ranges': [{\n 'CidrIpv6': '::/0',\n }],\n 'ToPort': 443\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n if update_gate_rules:\n # castle-gate allow rules\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 80,\n 'ToPort': 80,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 443,\n 'ToPort': 443,\n 'UserIdGroupPairs': [{'GroupId': moat_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n # vault allow rules\n if update_vault_rules:\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 5432,\n 'ToPort': 5432,\n 'UserIdGroupPairs': [{'GroupId': gate_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Create uploads and logs S3 buckets\n # XXX create the identities bucket?\n # XXX need to force private.\n if not s3_identities_bucket:\n s3_identities_bucket = '%s-identities' % tag_prefix\n s3_uploads_bucket = tag_prefix\n s3_client = boto3.client('s3')\n if s3_logs_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_logs_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n # Apply bucket encryption by default\n found_encryption = False\n try:\n resp = s3_client.get_bucket_encryption(\n Bucket=s3_logs_bucket)\n if resp['ServerSideEncryptionConfiguration']['Rules'][0][\n 'ApplyServerSideEncryptionByDefault'][\n 'SSEAlgorithm'] == 'AES256':\n found_encryption = True\n LOGGER.info(\"%s found encryption AES256 enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for logs %s\",\n tag_prefix, s3_logs_bucket)\n if not err.response.get('Error', {}).get('Code', 'Unknown') == \\\n 'ServerSideEncryptionConfigurationNotFoundError':\n raise\n if not found_encryption:\n s3_client.put_bucket_encryption(\n Bucket=s3_logs_bucket,\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256',\n }\n }]\n })\n LOGGER.info(\"%s enable encryption on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # Set versioning and lifecycle policies\n resp = s3_client.get_bucket_versioning(\n Bucket=s3_logs_bucket)\n if 'Status' in resp and resp['Status'] == 'Enabled':\n LOGGER.info(\"%s found versioning enabled on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_versioning(\n Bucket=s3_logs_bucket,\n VersioningConfiguration={\n 'MFADelete': 'Disabled',\n 'Status': 'Enabled'\n })\n LOGGER.info(\"%s enable versioning on %s bucket\",\n tag_prefix, s3_logs_bucket)\n found_policy = False\n #pylint:disable=too-many-nested-blocks\n try:\n resp = s3_client.get_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket)\n for rule in resp['Rules']:\n if rule['Status'] == 'Enabled':\n found_rule = True\n for transition in rule['Transitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('Days', 0) < 90:\n found_rule = False\n LOGGER.warning(\"%s lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['Expiration'].get('Days', 0) < 365:\n found_rule = False\n LOGGER.warning(\n \"%s lifecycle expiration is less than 365 days.\",\n tag_prefix)\n for transition in rule['NoncurrentVersionTransitions']:\n if transition['StorageClass'] == 'GLACIER':\n if transition.get('NoncurrentDays', 0) < 90:\n found_rule = False\n LOGGER.warning(\n \"%s version lifecycle for 'GLACIER'\"\\\n \" is less than 90 days.\", tag_prefix)\n break\n if rule['NoncurrentVersionExpiration'].get(\n 'NoncurrentDays', 0) < 365:\n found_rule = False\n LOGGER.warning(\"%s lifecycle version expiration is\"\\\n \" less than 365 days.\", tag_prefix)\n if found_rule:\n found_policy = True\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'NoSuchLifecycleConfiguration':\n raise\n if found_policy:\n LOGGER.info(\"%s found lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n else:\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=s3_logs_bucket,\n LifecycleConfiguration={\n \"Rules\": [{\n \"Status\": \"Enabled\",\n \"ID\": \"expire-logs\",\n \"Filter\": {\n \"Prefix\": \"\", # This is required.\n },\n \"Transitions\": [{\n \"Days\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n \"Expiration\" : {\n \"Days\": 365\n },\n \"NoncurrentVersionTransitions\": [{\n \"NoncurrentDays\": 90,\n \"StorageClass\": \"GLACIER\"\n }],\n 'NoncurrentVersionExpiration': {\n 'NoncurrentDays': 365\n },\n }]})\n LOGGER.info(\"%s update lifecycle policy on %s bucket\",\n tag_prefix, s3_logs_bucket)\n\n # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy\n elb_account_ids_per_region = {\n 'us-east-1': '127311923021',\n 'us-east-2': '033677994240',\n 'us-west-1': '027434742980',\n 'us-west-2': '797873946194',\n 'af-south-1': '098369216593',\n 'ca-central-1': '985666609251',\n 'eu-central-1': '054676820928',\n 'eu-west-1': '156460612806',\n 'eu-west-2': '652711504416',\n 'eu-south-1': '635631232127',\n 'eu-west-3': '009996457667',\n 'eu-north-1': '897822967062',\n 'ap-east-1': '754344448648',\n 'ap-northeast-1': '582318560864',\n 'ap-northeast-2': '600734575887',\n 'ap-northeast-3': '383597477331',\n 'ap-southeast-1': '114774131450',\n 'ap-southeast-2': '783225319266',\n 'ap-south-1': '718504428378',\n 'me-south-1': '076674570225',\n 'sa-east-1': '507241528517'\n }\n elb_account_id = elb_account_ids_per_region[region_name]\n s3_client.put_bucket_policy(\n Bucket=s3_logs_bucket,\n Policy=json.dumps({\n \"Version\": \"2008-10-17\",\n \"Id\": \"WriteLogs\",\n \"Statement\": [{\n # billing reports\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": [\n \"s3:GetBucketAcl\",\n \"s3:GetBucketPolicy\"\n ],\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"billingreports.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\": \"arn:aws:s3:::%s/*\" % s3_logs_bucket\n }, {\n # ELB access logs\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"AWS\": \"arn:aws:iam::%s:root\" % elb_account_id\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n \"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:PutObject\",\n \"Resource\":\n (\"arn:aws:s3:::%s/var/log/elb/*\" % s3_logs_bucket),\n \"Condition\": {\n \"StringEquals\": {\n \"s3:x-amz-acl\": \"bucket-owner-full-control\"\n }\n }\n }, {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"delivery.logs.amazonaws.com\"\n },\n \"Action\": \"s3:GetBucketAcl\",\n \"Resource\": \"arn:aws:s3:::%s\" % s3_logs_bucket\n }]\n }))\n\n if s3_uploads_bucket:\n try:\n resp = s3_client.create_bucket(\n ACL='private',\n Bucket=s3_uploads_bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n })\n LOGGER.info(\"%s created S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n except botocore.exceptions.ClientError as err:\n LOGGER.info(\"%s found S3 bucket for uploads %s\",\n tag_prefix, s3_uploads_bucket)\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'BucketAlreadyOwnedByYou':\n raise\n\n # Create instance profiles ...\n iam_client = boto3.client('iam')\n # ... for webfront instances\n create_instance_profile(\n create_gate_role(gate_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n # ... for databases instances\n create_instance_profile(\n create_vault_role(vault_name,\n s3_logs_bucket=s3_logs_bucket, s3_uploads_bucket=s3_uploads_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n if ssh_key_name:\n if not ssh_key_content:\n ssh_key_path = os.path.join(os.getenv('HOME'),\n '.ssh', '%s.pub' % ssh_key_name)\n if os.path.exists(ssh_key_path):\n with open(ssh_key_path, 'rb') as ssh_key_obj:\n ssh_key_content = ssh_key_obj.read()\n else:\n LOGGER.warning(\"%s no content for SSH key %s\",\n tag_prefix, ssh_key_name)\n # import SSH keys\n try:\n resp = ec2_client.import_key_pair(\n DryRun=dry_run,\n KeyName=ssh_key_name,\n PublicKeyMaterial=ssh_key_content)\n LOGGER.info(\"%s imported SSH key %s\", tag_prefix, ssh_key_name)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidKeyPair.Duplicate':\n raise\n LOGGER.info(\"%s found SSH key %s\", tag_prefix, ssh_key_name)\n\n # ... for sally instances\n create_instance_profile(\n create_logs_role(kitchen_door_name,\n s3_logs_bucket=s3_logs_bucket,\n iam_client=iam_client, tag_prefix=tag_prefix),\n iam_client=iam_client, region_name=region_name,\n tag_prefix=tag_prefix, dry_run=dry_run)\n\n # allows SSH connection to instances for debugging\n update_kitchen_door_rules = (not kitchen_door_sg_id)\n if not kitchen_door_sg_id:\n resp = ec2_client.create_security_group(\n Description='%s SSH access' % tag_prefix,\n GroupName=kitchen_door_name,\n VpcId=vpc_id,\n DryRun=dry_run)\n kitchen_door_sg_id = resp['GroupId']\n LOGGER.info(\"%s created %s security group %s\",\n tag_prefix, kitchen_door_name, kitchen_door_sg_id)\n\n if update_kitchen_door_rules:\n try:\n if sally_ip:\n cidr_block = '%s/32' % sally_ip\n else:\n LOGGER.warning(\"no IP range was specified to restrict\"\\\n \" access to SSH port\")\n cidr_block = '0.0.0.0/0'\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n CidrIp=cidr_block,\n IpProtocol='tcp',\n FromPort=22,\n ToPort=22)\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_egress(\n DryRun=dry_run,\n GroupId=kitchen_door_sg_id,\n IpPermissions=[{\n 'IpProtocol': '-1',\n 'IpRanges': [{\n 'CidrIp': '0.0.0.0/0',\n }]}])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=gate_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n try:\n resp = ec2_client.authorize_security_group_ingress(\n DryRun=dry_run,\n GroupId=vault_sg_id,\n IpPermissions=[{\n 'IpProtocol': 'tcp',\n 'FromPort': 22,\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': kitchen_door_sg_id}]\n }])\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidPermission.Duplicate':\n raise\n\n # Creates encryption keys (KMS) in region\n if not storage_enckey:\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n # Create an Application ELB and WAF\n load_balancer_arn = create_elb(\n tag_prefix, web_subnet_by_cidrs, moat_sg_id,\n s3_logs_bucket=s3_logs_bucket,\n tls_priv_key=tls_priv_key, tls_fullchain_cert=tls_fullchain_cert,\n region_name=region_name)\n create_waf(\n tag_prefix,\n elb_arn=load_balancer_arn,\n s3_logs_bucket=s3_logs_bucket,\n region_name=region_name,\n dry_run=dry_run)", "def test_replace_cluster_network(self):\n pass", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )", "def test_ipam_ip_addresses_delete(self):\n pass", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")", "def test_networking_project_network_delete(self):\n pass", "def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})", "def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)", "def test_delete_cluster_network(self):\n pass", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def delete_endpoint(EndpointName=None):\n pass", "def post_floating_ip_delete(self, resource_id, resource_dict):\n pass", "def testPutNetworkLocalIp(self):\n models.System.objects.all().delete()\n self._saveSystem()\n old_count = models.Network.objects.count()\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"169.254.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count, models.Network.objects.count())\n\n self._put('inventory/networks/1/',\n data=testsxml.network_put_xml_opt_ip_addr % \"4.4.4.4\",\n username=\"admin\", password=\"password\")\n self.assertEquals(old_count + 1, models.Network.objects.count())", "def create_public_ip(self):\n raise NotImplementedError", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def post_subnet_create(self, resource_dict):\n pass", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def post_network_ipam_update(self, resource_id, resource_dict):\n pass", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])", "def subnet_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def _disassociate_floating_ip(self, context, address, interface,\n instance_uuid):\n interface = CONF.public_interface or interface\n\n @utils.synchronized(six.text_type(address))\n def do_disassociate():\n # NOTE(vish): Note that we are disassociating in the db before we\n # actually remove the ip address on the host. We are\n # safe from races on this host due to the decorator,\n # but another host might grab the ip right away. We\n # don't worry about this case because the minuscule\n # window where the ip is on both hosts shouldn't cause\n # any problems.\n floating = objects.FloatingIP.disassociate(context, address)\n fixed = floating.fixed_ip\n if not fixed:\n # NOTE(vish): ip was already disassociated\n return\n if interface:\n # go go driver time\n self.l3driver.remove_floating_ip(address, fixed.address,\n interface, fixed.network)\n payload = dict(project_id=context.project_id,\n instance_id=instance_uuid,\n floating_ip=address)\n self.notifier.info(context,\n 'network.floating_ip.disassociate', payload)\n do_disassociate()", "def test_networking_project_network_tag_delete(self):\n pass", "def test_replace_namespaced_egress_network_policy(self):\n pass", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def rm_network(c):\n print('Stopping local test network and removing containers')\n with c.cd('images'):\n c.run('sudo docker-compose down -v', hide='stderr')\n\n c.run('sudo rm -rf volumes/stellar-core/opt/stellar-core/buckets')\n c.run('sudo rm -f volumes/stellar-core/opt/stellar-core/*.log')\n c.run('sudo rm -rf volumes/stellar-core/tmp')", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def test_delete_collection_namespaced_egress_network_policy(self):\n pass", "def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def network(self):\n address = unicode(\"%s/%s\" % (self.address, _get_cidr(self.netmask)))\n return IPv4Network(address, strict=False)", "def test_delete_namespaced_egress_network_policy(self):\n pass", "def post_virtual_network_update(self, resource_id, resource_dict):\n pass", "def test_delete_network(self):\n pass", "def unlink(address):", "def proxy(self):\n result = self.instances(role='stateless-body', format=\"PrivateIpAddress\")\n return result[0][0] if result else None", "def delete_network_profile(arn=None):\n pass", "def test_networking_project_network_update(self):\n pass", "def post_virtual_network_create(self, resource_dict):\n pass", "def post_subnet_update(self, resource_id, resource_dict):\n pass" ]
[ "0.60266286", "0.59202003", "0.55559486", "0.55126613", "0.54336125", "0.5428855", "0.5325266", "0.52842504", "0.52760464", "0.51872087", "0.51832", "0.5157842", "0.5133802", "0.51154464", "0.50945", "0.5071877", "0.50229526", "0.49781683", "0.49698222", "0.49608693", "0.49414656", "0.4876385", "0.4876109", "0.48506483", "0.48391956", "0.483107", "0.48198774", "0.48144412", "0.47877264", "0.47826856", "0.47646627", "0.47553644", "0.4750884", "0.4749559", "0.47416437", "0.47397834", "0.4732694", "0.47132242", "0.46988556", "0.46973038", "0.46942642", "0.46913528", "0.46887413", "0.46875468", "0.46847612", "0.4670847", "0.4657646", "0.46575457", "0.46482673", "0.4631355", "0.462636", "0.4621056", "0.4620248", "0.46131995", "0.4612998", "0.46098763", "0.46023968", "0.45993194", "0.4590164", "0.45898372", "0.45866376", "0.45848858", "0.45848703", "0.4582764", "0.45790833", "0.45782888", "0.45677748", "0.45659846", "0.45580745", "0.45504415", "0.45486787", "0.45483294", "0.45476377", "0.45462418", "0.45461383", "0.45428243", "0.4537107", "0.45248502", "0.45184824", "0.45176128", "0.4516666", "0.4515902", "0.4514629", "0.45142433", "0.4511305", "0.4507707", "0.4507354", "0.44984463", "0.44944513", "0.44903988", "0.4489874", "0.44850954", "0.44808105", "0.44801226", "0.44762012", "0.44726735", "0.44724828", "0.44715852", "0.44676235", "0.44675547", "0.44673678" ]
0.0
-1
The instance must be in the running state when you call this operation. This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def modify_audit_log_filter_with_options( self, request: dds_20151201_models.ModifyAuditLogFilterRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyAuditLogFilterResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.filter): query['Filter'] = request.filter if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyAuditLogFilter', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyAuditLogFilterResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def on_start(self):\r\n self.log()", "def log_runtime(label, mean_time, std, instances):\n pass", "def on_sync(self):\r\n self.log()", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def doBackup(self):\n self.logger.log(\"Begin to backup instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__bakStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Backup instance status successfully.\")\n self.logger.closeLog()", "def monitor(self):", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def running(self):\n pass", "def post_run_hook(self, instance, status):\n instance.log_lifecycle(\"post_run\")", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def do_workload(self):\n pass", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def on_up(self):\r\n self.log()", "def start_monitoring(self):\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def __init__(self, memory=4000):\n self.session = boto3.Session()\n self.batch_client = self.session.client(\"batch\")\n self.logs_client = self.session.client(\"logs\")\n self.memory = memory", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def GetLogs(self):\n raise NotImplementedError()", "def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))", "def on_left(self):\r\n self.log()", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def service( self ):\n\n self.alive = time.time()", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def test_update_instances_schedule_state(self):\n pass", "def _sync(self):\n if self._conf.start_optime:\n # TODO optimize\n log.info(\"locating oplog, it will take a while\")\n oplog_start = self._conf.start_optime\n doc = self._src.client()['local']['oplog.rs'].find_one({'ts': {'$gte': oplog_start}})\n if not doc:\n log.error('no oplogs newer than the specified oplog')\n return\n oplog_start = doc['ts']\n log.info('start timestamp is %s actually' % oplog_start)\n self._last_optime = oplog_start\n self._sync_oplog(oplog_start)\n else:\n oplog_start = get_optime(self._src.client())\n if not oplog_start:\n log.error('get oplog_start failed, terminate')\n sys.exit(1)\n self._last_optime = oplog_start\n self._sync_databases()\n if self._optime_logger:\n self._optime_logger.write(oplog_start)\n log.info('first %s' % oplog_start)\n self._sync_oplog(oplog_start)", "def event_log(self):\n pass", "def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)", "def log(self):\r\n return self._log", "def test_retrieve_instances_schedule_state(self):\n pass", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def get_full_log(self):\n return self._get_log('full')", "def train_instance(self, epoch, dataset=None, check_point_interval=None, is_restore=False, with_tensorboard=True):\n\n if with_tensorboard:\n self.open_tensorboard()\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n save_path = os.path.join(self.instance.instance_path, 'check_point')\n check_point_path = os.path.join(save_path, 'instance.ckpt')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n self.log('make save dir')\n\n self.log('init global variables')\n sess.run(tf.global_variables_initializer())\n\n self.log('init summary_writer')\n summary_writer = tf.summary.FileWriter(self.instance.instance_summary_folder_path, sess.graph)\n\n if is_restore:\n self.log('restore check point')\n saver.restore(sess, check_point_path)\n\n batch_size = self.instance.batch_size\n iter_per_epoch = int(dataset.data_size / batch_size)\n self.log('total Epoch: %d, total iter: %d, iter per epoch: %d'\n % (epoch, epoch * iter_per_epoch, iter_per_epoch))\n\n iter_num, loss_val_D, loss_val_G = 0, 0, 0\n for epoch_ in range(epoch):\n for _ in range(iter_per_epoch):\n iter_num += 1\n self.instance.train_model(sess=sess, iter_num=iter_num, dataset=dataset)\n self.__visualizer_task(sess, iter_num, dataset)\n\n self.instance.write_summary(sess=sess, iter_num=iter_num, dataset=dataset,\n summary_writer=summary_writer)\n\n if iter_num % check_point_interval == 0:\n saver.save(sess, check_point_path)\n self.log(\"epoch %s end\" % (epoch_ + 1))\n self.log('train end')\n\n tf.reset_default_graph()\n self.log('reset default graph')\n\n if with_tensorboard:\n self.close_tensorboard()", "def __periodic_maintenance__(self):\n pass", "def snapshot(self, context, instance, image_id, update_task_state):\n raise NotImplementedError()", "def monitor_instance(self, instance_id):\r\n return self.monitor_instances([instance_id])", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def snapshot(self):\n pass", "def start_instance(InstanceId=None):\n pass", "def on_L2(self):\r\n self.log()", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def audit(self):\n self.ping()", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)", "def __init__(self, batch_size, log_steps):\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n\n # Logs start of step 0 then end of each step based on log_steps interval.\n self.timestamp_log = []", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def daemonize(self):\n raise NotImplementedError()", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "def save(self):\n if self._dirty:\n # key and timestamp\n data = {\n InstanceStates.INSTANCE_TABLE_NAME: self._service,\n InstanceStates.INSTANCE_TABLE_ACCOUNT_REGION: self._current_account_region,\n InstanceStates.INSTANCE_TABLE_TIMESTAMP: Decimal(time.time())\n }\n\n # store instance states as one column per instance\n for i in self._state_info:\n data[i] = self._state_info[i]\n\n # instances to purge\n if len(self._instances_to_purge) > 0:\n data[InstanceStates.INSTANCE_TABLE_PURGE] = self._instances_to_purge\n\n self.state_table.put_item_with_retries(Item=data)\n self._dirty = False", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def __init__(self, instance):\n self.instance = instance\n self.runs = []", "def __on_backup_created(self, logger, *args):", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def test_get_instance_state(self):\r\n self.peer_grading.get_instance_state()", "def on_L1(self):\r\n self.log()", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def test_delete_instance_changes_power_state(self):\n instance = self._create_fake_instance_obj()\n self.compute._delete_instance(self.context, instance, [])\n self.assertEqual(power_state.NOSTATE, instance.power_state)", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def log_kernel_launch(self, cmd: List[str]) -> None:\n pass", "def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response", "def main():\n\n parser = init_parser()\n args = parser.parse_args()\n\n # Set up logging.\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \\\n '%(lineno)s %(message)s ', level=level)\n logging.info(\"Logging started\")\n\n message = \"Backing up \"\n if args.source_code:\n message += \"source and \"\n message += \"data for: {0}\".format(args.app_id)\n logging.info(message)\n\n zk_connection_locations = appscale_info.get_zk_locations_string()\n zookeeper = zk.ZKTransaction(host=zk_connection_locations)\n db_info = appscale_info.get_db_info()\n table = db_info[':table']\n\n skip_list = args.skip\n if not skip_list:\n skip_list = []\n logging.info(\"Will skip the following kinds: {0}\".format(sorted(skip_list)))\n ds_backup = DatastoreBackup(args.app_id, zookeeper, table,\n source_code=args.source_code, skip_list=sorted(skip_list))\n try:\n ds_backup.run()\n finally:\n zookeeper.close()", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def getLogs():", "def getLogs():", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def start():\n logging.info(\"Execution Started\")", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def snapshot(self):\n return self.journal.create_checkpoint()", "def test_restart_statestore(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n\n # Restart statestore. Verify one catalogd is assigned as active, the other is\n # assigned as standby.\n self.cluster.statestored.restart()\n wait_time_s = build_flavor_timeout(90, slow_build_timeout=180)\n self.cluster.statestored.service.wait_for_metric_value('statestore.live-backends',\n expected_value=5, timeout=wait_time_s)\n sleep_time_s = build_flavor_timeout(2, slow_build_timeout=5)\n sleep(sleep_time_s)\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n\n unexpected_msg = re.compile(\"Ignore the update of active catalogd since more recent \"\n \"update has been processed ([0-9]+ vs [0-9]+)\")\n self.assert_catalogd_log_contains(\"INFO\", unexpected_msg, expected_count=0)\n self.assert_impalad_log_contains(\"INFO\", unexpected_msg, expected_count=0)", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def export_getRunningInstancesHistory( self, timespan, bucketSize ):\n return gVirtualMachineDB.getRunningInstancesHistory( timespan, bucketSize )", "def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True" ]
[ "0.58156765", "0.55455977", "0.5419773", "0.5353307", "0.5311246", "0.52902937", "0.52496463", "0.5209651", "0.51289505", "0.50947905", "0.50946975", "0.50858665", "0.50722444", "0.50575197", "0.5044451", "0.504037", "0.5036916", "0.50341827", "0.50292313", "0.5020206", "0.50123", "0.50047606", "0.50003886", "0.49877083", "0.49759492", "0.49706113", "0.49431816", "0.49305305", "0.4928458", "0.49267578", "0.4924654", "0.49205193", "0.49178112", "0.48929366", "0.48657364", "0.48570013", "0.48568472", "0.48557425", "0.48555914", "0.48550963", "0.48510417", "0.48502585", "0.48456535", "0.484387", "0.48432833", "0.48412123", "0.48391682", "0.48361674", "0.48290518", "0.48243877", "0.48170596", "0.48079622", "0.47919264", "0.47827956", "0.4777095", "0.4776649", "0.47736618", "0.47730917", "0.47705632", "0.47653532", "0.4756454", "0.4734139", "0.4733906", "0.47226155", "0.4719467", "0.47176373", "0.47172096", "0.47166345", "0.47164068", "0.47163415", "0.47153816", "0.4714649", "0.4710761", "0.47106117", "0.47034428", "0.4698475", "0.4693774", "0.4690239", "0.46810684", "0.46735573", "0.46723154", "0.4671075", "0.46692687", "0.46627858", "0.46587133", "0.46579242", "0.4657704", "0.46552715", "0.46552715", "0.4653389", "0.46524897", "0.46486965", "0.46463934", "0.46462476", "0.46450648", "0.46427557", "0.46424696", "0.4640919", "0.46350932", "0.46321175", "0.46319675" ]
0.0
-1
The instance must be in the running state when you call this operation. This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def modify_audit_log_filter_with_options_async( self, request: dds_20151201_models.ModifyAuditLogFilterRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyAuditLogFilterResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.filter): query['Filter'] = request.filter if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_type): query['RoleType'] = request.role_type if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyAuditLogFilter', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyAuditLogFilterResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def on_start(self):\r\n self.log()", "def log_runtime(label, mean_time, std, instances):\n pass", "def on_sync(self):\r\n self.log()", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def monitor(self):", "def doBackup(self):\n self.logger.log(\"Begin to backup instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__bakStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Backup instance status successfully.\")\n self.logger.closeLog()", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def running(self):\n pass", "def post_run_hook(self, instance, status):\n instance.log_lifecycle(\"post_run\")", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def do_workload(self):\n pass", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def on_up(self):\r\n self.log()", "def start_monitoring(self):\n pass", "def __init__(self, memory=4000):\n self.session = boto3.Session()\n self.batch_client = self.session.client(\"batch\")\n self.logs_client = self.session.client(\"logs\")\n self.memory = memory", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def GetLogs(self):\n raise NotImplementedError()", "def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))", "def on_left(self):\r\n self.log()", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def service( self ):\n\n self.alive = time.time()", "def test_update_instances_schedule_state(self):\n pass", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _sync(self):\n if self._conf.start_optime:\n # TODO optimize\n log.info(\"locating oplog, it will take a while\")\n oplog_start = self._conf.start_optime\n doc = self._src.client()['local']['oplog.rs'].find_one({'ts': {'$gte': oplog_start}})\n if not doc:\n log.error('no oplogs newer than the specified oplog')\n return\n oplog_start = doc['ts']\n log.info('start timestamp is %s actually' % oplog_start)\n self._last_optime = oplog_start\n self._sync_oplog(oplog_start)\n else:\n oplog_start = get_optime(self._src.client())\n if not oplog_start:\n log.error('get oplog_start failed, terminate')\n sys.exit(1)\n self._last_optime = oplog_start\n self._sync_databases()\n if self._optime_logger:\n self._optime_logger.write(oplog_start)\n log.info('first %s' % oplog_start)\n self._sync_oplog(oplog_start)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def event_log(self):\n pass", "def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)", "def test_retrieve_instances_schedule_state(self):\n pass", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def log(self):\r\n return self._log", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def train_instance(self, epoch, dataset=None, check_point_interval=None, is_restore=False, with_tensorboard=True):\n\n if with_tensorboard:\n self.open_tensorboard()\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n save_path = os.path.join(self.instance.instance_path, 'check_point')\n check_point_path = os.path.join(save_path, 'instance.ckpt')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n self.log('make save dir')\n\n self.log('init global variables')\n sess.run(tf.global_variables_initializer())\n\n self.log('init summary_writer')\n summary_writer = tf.summary.FileWriter(self.instance.instance_summary_folder_path, sess.graph)\n\n if is_restore:\n self.log('restore check point')\n saver.restore(sess, check_point_path)\n\n batch_size = self.instance.batch_size\n iter_per_epoch = int(dataset.data_size / batch_size)\n self.log('total Epoch: %d, total iter: %d, iter per epoch: %d'\n % (epoch, epoch * iter_per_epoch, iter_per_epoch))\n\n iter_num, loss_val_D, loss_val_G = 0, 0, 0\n for epoch_ in range(epoch):\n for _ in range(iter_per_epoch):\n iter_num += 1\n self.instance.train_model(sess=sess, iter_num=iter_num, dataset=dataset)\n self.__visualizer_task(sess, iter_num, dataset)\n\n self.instance.write_summary(sess=sess, iter_num=iter_num, dataset=dataset,\n summary_writer=summary_writer)\n\n if iter_num % check_point_interval == 0:\n saver.save(sess, check_point_path)\n self.log(\"epoch %s end\" % (epoch_ + 1))\n self.log('train end')\n\n tf.reset_default_graph()\n self.log('reset default graph')\n\n if with_tensorboard:\n self.close_tensorboard()", "def get_full_log(self):\n return self._get_log('full')", "def __periodic_maintenance__(self):\n pass", "def snapshot(self, context, instance, image_id, update_task_state):\n raise NotImplementedError()", "def monitor_instance(self, instance_id):\r\n return self.monitor_instances([instance_id])", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def snapshot(self):\n pass", "def start_instance(InstanceId=None):\n pass", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def on_L2(self):\r\n self.log()", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def audit(self):\n self.ping()", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)", "def __init__(self, batch_size, log_steps):\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n\n # Logs start of step 0 then end of each step based on log_steps interval.\n self.timestamp_log = []", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)", "def save(self):\n if self._dirty:\n # key and timestamp\n data = {\n InstanceStates.INSTANCE_TABLE_NAME: self._service,\n InstanceStates.INSTANCE_TABLE_ACCOUNT_REGION: self._current_account_region,\n InstanceStates.INSTANCE_TABLE_TIMESTAMP: Decimal(time.time())\n }\n\n # store instance states as one column per instance\n for i in self._state_info:\n data[i] = self._state_info[i]\n\n # instances to purge\n if len(self._instances_to_purge) > 0:\n data[InstanceStates.INSTANCE_TABLE_PURGE] = self._instances_to_purge\n\n self.state_table.put_item_with_retries(Item=data)\n self._dirty = False", "def daemonize(self):\n raise NotImplementedError()", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def __init__(self, instance):\n self.instance = instance\n self.runs = []", "def __on_backup_created(self, logger, *args):", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def test_get_instance_state(self):\r\n self.peer_grading.get_instance_state()", "def on_L1(self):\r\n self.log()", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def test_delete_instance_changes_power_state(self):\n instance = self._create_fake_instance_obj()\n self.compute._delete_instance(self.context, instance, [])\n self.assertEqual(power_state.NOSTATE, instance.power_state)", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def log_kernel_launch(self, cmd: List[str]) -> None:\n pass", "def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def main():\n\n parser = init_parser()\n args = parser.parse_args()\n\n # Set up logging.\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \\\n '%(lineno)s %(message)s ', level=level)\n logging.info(\"Logging started\")\n\n message = \"Backing up \"\n if args.source_code:\n message += \"source and \"\n message += \"data for: {0}\".format(args.app_id)\n logging.info(message)\n\n zk_connection_locations = appscale_info.get_zk_locations_string()\n zookeeper = zk.ZKTransaction(host=zk_connection_locations)\n db_info = appscale_info.get_db_info()\n table = db_info[':table']\n\n skip_list = args.skip\n if not skip_list:\n skip_list = []\n logging.info(\"Will skip the following kinds: {0}\".format(sorted(skip_list)))\n ds_backup = DatastoreBackup(args.app_id, zookeeper, table,\n source_code=args.source_code, skip_list=sorted(skip_list))\n try:\n ds_backup.run()\n finally:\n zookeeper.close()", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def getLogs():", "def getLogs():", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def start():\n logging.info(\"Execution Started\")", "def test_restart_statestore(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n\n # Restart statestore. Verify one catalogd is assigned as active, the other is\n # assigned as standby.\n self.cluster.statestored.restart()\n wait_time_s = build_flavor_timeout(90, slow_build_timeout=180)\n self.cluster.statestored.service.wait_for_metric_value('statestore.live-backends',\n expected_value=5, timeout=wait_time_s)\n sleep_time_s = build_flavor_timeout(2, slow_build_timeout=5)\n sleep(sleep_time_s)\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n\n unexpected_msg = re.compile(\"Ignore the update of active catalogd since more recent \"\n \"update has been processed ([0-9]+ vs [0-9]+)\")\n self.assert_catalogd_log_contains(\"INFO\", unexpected_msg, expected_count=0)\n self.assert_impalad_log_contains(\"INFO\", unexpected_msg, expected_count=0)", "def snapshot(self):\n return self.journal.create_checkpoint()", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def export_getRunningInstancesHistory( self, timespan, bucketSize ):\n return gVirtualMachineDB.getRunningInstancesHistory( timespan, bucketSize )", "def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True" ]
[ "0.5816251", "0.55439824", "0.54192", "0.5350156", "0.53101283", "0.5288235", "0.5250576", "0.5207726", "0.5130923", "0.5095212", "0.50946444", "0.50885844", "0.5069727", "0.5060447", "0.5045108", "0.50422746", "0.5036676", "0.503287", "0.5031638", "0.50175893", "0.50110024", "0.5006715", "0.49987793", "0.4984645", "0.49733037", "0.49675646", "0.49428928", "0.49306312", "0.49295944", "0.49279422", "0.492351", "0.49171263", "0.49165663", "0.48896623", "0.4868515", "0.48584634", "0.48578468", "0.48570728", "0.4854869", "0.48531812", "0.4848399", "0.48482394", "0.484595", "0.48452473", "0.48421317", "0.48377466", "0.4837534", "0.48359197", "0.48298427", "0.4825254", "0.4817078", "0.48092136", "0.47922885", "0.47836322", "0.47752738", "0.47738013", "0.47737476", "0.4772084", "0.4769607", "0.47625557", "0.47573707", "0.4734552", "0.47326964", "0.472421", "0.4719711", "0.4718123", "0.4717977", "0.4716275", "0.4715142", "0.47145185", "0.4714278", "0.47141027", "0.4711673", "0.47113934", "0.47042722", "0.46972016", "0.4695865", "0.46914384", "0.46781656", "0.46761757", "0.46735793", "0.46683195", "0.4665803", "0.46647793", "0.46602386", "0.46590814", "0.46552354", "0.46527895", "0.46515787", "0.46515787", "0.46501642", "0.4649016", "0.46476138", "0.46474054", "0.46454194", "0.46426737", "0.4642541", "0.46421257", "0.46343124", "0.46333972", "0.46324918" ]
0.0
-1
The instance must be in the running state when you call this operation. This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def modify_audit_log_filter( self, request: dds_20151201_models.ModifyAuditLogFilterRequest, ) -> dds_20151201_models.ModifyAuditLogFilterResponse: runtime = util_models.RuntimeOptions() return self.modify_audit_log_filter_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def on_start(self):\r\n self.log()", "def log_runtime(label, mean_time, std, instances):\n pass", "def on_sync(self):\r\n self.log()", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def doBackup(self):\n self.logger.log(\"Begin to backup instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__bakStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Backup instance status successfully.\")\n self.logger.closeLog()", "def monitor(self):", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def running(self):\n pass", "def post_run_hook(self, instance, status):\n instance.log_lifecycle(\"post_run\")", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def do_workload(self):\n pass", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def on_up(self):\r\n self.log()", "def start_monitoring(self):\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def __init__(self, memory=4000):\n self.session = boto3.Session()\n self.batch_client = self.session.client(\"batch\")\n self.logs_client = self.session.client(\"logs\")\n self.memory = memory", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def GetLogs(self):\n raise NotImplementedError()", "def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))", "def on_left(self):\r\n self.log()", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def service( self ):\n\n self.alive = time.time()", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def test_update_instances_schedule_state(self):\n pass", "def _sync(self):\n if self._conf.start_optime:\n # TODO optimize\n log.info(\"locating oplog, it will take a while\")\n oplog_start = self._conf.start_optime\n doc = self._src.client()['local']['oplog.rs'].find_one({'ts': {'$gte': oplog_start}})\n if not doc:\n log.error('no oplogs newer than the specified oplog')\n return\n oplog_start = doc['ts']\n log.info('start timestamp is %s actually' % oplog_start)\n self._last_optime = oplog_start\n self._sync_oplog(oplog_start)\n else:\n oplog_start = get_optime(self._src.client())\n if not oplog_start:\n log.error('get oplog_start failed, terminate')\n sys.exit(1)\n self._last_optime = oplog_start\n self._sync_databases()\n if self._optime_logger:\n self._optime_logger.write(oplog_start)\n log.info('first %s' % oplog_start)\n self._sync_oplog(oplog_start)", "def event_log(self):\n pass", "def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)", "def log(self):\r\n return self._log", "def test_retrieve_instances_schedule_state(self):\n pass", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def get_full_log(self):\n return self._get_log('full')", "def train_instance(self, epoch, dataset=None, check_point_interval=None, is_restore=False, with_tensorboard=True):\n\n if with_tensorboard:\n self.open_tensorboard()\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n save_path = os.path.join(self.instance.instance_path, 'check_point')\n check_point_path = os.path.join(save_path, 'instance.ckpt')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n self.log('make save dir')\n\n self.log('init global variables')\n sess.run(tf.global_variables_initializer())\n\n self.log('init summary_writer')\n summary_writer = tf.summary.FileWriter(self.instance.instance_summary_folder_path, sess.graph)\n\n if is_restore:\n self.log('restore check point')\n saver.restore(sess, check_point_path)\n\n batch_size = self.instance.batch_size\n iter_per_epoch = int(dataset.data_size / batch_size)\n self.log('total Epoch: %d, total iter: %d, iter per epoch: %d'\n % (epoch, epoch * iter_per_epoch, iter_per_epoch))\n\n iter_num, loss_val_D, loss_val_G = 0, 0, 0\n for epoch_ in range(epoch):\n for _ in range(iter_per_epoch):\n iter_num += 1\n self.instance.train_model(sess=sess, iter_num=iter_num, dataset=dataset)\n self.__visualizer_task(sess, iter_num, dataset)\n\n self.instance.write_summary(sess=sess, iter_num=iter_num, dataset=dataset,\n summary_writer=summary_writer)\n\n if iter_num % check_point_interval == 0:\n saver.save(sess, check_point_path)\n self.log(\"epoch %s end\" % (epoch_ + 1))\n self.log('train end')\n\n tf.reset_default_graph()\n self.log('reset default graph')\n\n if with_tensorboard:\n self.close_tensorboard()", "def __periodic_maintenance__(self):\n pass", "def snapshot(self, context, instance, image_id, update_task_state):\n raise NotImplementedError()", "def monitor_instance(self, instance_id):\r\n return self.monitor_instances([instance_id])", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def snapshot(self):\n pass", "def start_instance(InstanceId=None):\n pass", "def on_L2(self):\r\n self.log()", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def audit(self):\n self.ping()", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)", "def __init__(self, batch_size, log_steps):\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n\n # Logs start of step 0 then end of each step based on log_steps interval.\n self.timestamp_log = []", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def daemonize(self):\n raise NotImplementedError()", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "def save(self):\n if self._dirty:\n # key and timestamp\n data = {\n InstanceStates.INSTANCE_TABLE_NAME: self._service,\n InstanceStates.INSTANCE_TABLE_ACCOUNT_REGION: self._current_account_region,\n InstanceStates.INSTANCE_TABLE_TIMESTAMP: Decimal(time.time())\n }\n\n # store instance states as one column per instance\n for i in self._state_info:\n data[i] = self._state_info[i]\n\n # instances to purge\n if len(self._instances_to_purge) > 0:\n data[InstanceStates.INSTANCE_TABLE_PURGE] = self._instances_to_purge\n\n self.state_table.put_item_with_retries(Item=data)\n self._dirty = False", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def __init__(self, instance):\n self.instance = instance\n self.runs = []", "def __on_backup_created(self, logger, *args):", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def test_get_instance_state(self):\r\n self.peer_grading.get_instance_state()", "def on_L1(self):\r\n self.log()", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def test_delete_instance_changes_power_state(self):\n instance = self._create_fake_instance_obj()\n self.compute._delete_instance(self.context, instance, [])\n self.assertEqual(power_state.NOSTATE, instance.power_state)", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def log_kernel_launch(self, cmd: List[str]) -> None:\n pass", "def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response", "def main():\n\n parser = init_parser()\n args = parser.parse_args()\n\n # Set up logging.\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \\\n '%(lineno)s %(message)s ', level=level)\n logging.info(\"Logging started\")\n\n message = \"Backing up \"\n if args.source_code:\n message += \"source and \"\n message += \"data for: {0}\".format(args.app_id)\n logging.info(message)\n\n zk_connection_locations = appscale_info.get_zk_locations_string()\n zookeeper = zk.ZKTransaction(host=zk_connection_locations)\n db_info = appscale_info.get_db_info()\n table = db_info[':table']\n\n skip_list = args.skip\n if not skip_list:\n skip_list = []\n logging.info(\"Will skip the following kinds: {0}\".format(sorted(skip_list)))\n ds_backup = DatastoreBackup(args.app_id, zookeeper, table,\n source_code=args.source_code, skip_list=sorted(skip_list))\n try:\n ds_backup.run()\n finally:\n zookeeper.close()", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def getLogs():", "def getLogs():", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def start():\n logging.info(\"Execution Started\")", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def snapshot(self):\n return self.journal.create_checkpoint()", "def test_restart_statestore(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n\n # Restart statestore. Verify one catalogd is assigned as active, the other is\n # assigned as standby.\n self.cluster.statestored.restart()\n wait_time_s = build_flavor_timeout(90, slow_build_timeout=180)\n self.cluster.statestored.service.wait_for_metric_value('statestore.live-backends',\n expected_value=5, timeout=wait_time_s)\n sleep_time_s = build_flavor_timeout(2, slow_build_timeout=5)\n sleep(sleep_time_s)\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n\n unexpected_msg = re.compile(\"Ignore the update of active catalogd since more recent \"\n \"update has been processed ([0-9]+ vs [0-9]+)\")\n self.assert_catalogd_log_contains(\"INFO\", unexpected_msg, expected_count=0)\n self.assert_impalad_log_contains(\"INFO\", unexpected_msg, expected_count=0)", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def export_getRunningInstancesHistory( self, timespan, bucketSize ):\n return gVirtualMachineDB.getRunningInstancesHistory( timespan, bucketSize )", "def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True" ]
[ "0.58156765", "0.55455977", "0.5419773", "0.5353307", "0.5311246", "0.52902937", "0.52496463", "0.5209651", "0.51289505", "0.50947905", "0.50946975", "0.50858665", "0.50722444", "0.50575197", "0.5044451", "0.504037", "0.5036916", "0.50341827", "0.50292313", "0.5020206", "0.50123", "0.50047606", "0.50003886", "0.49877083", "0.49759492", "0.49706113", "0.49431816", "0.49305305", "0.4928458", "0.49267578", "0.4924654", "0.49205193", "0.49178112", "0.48929366", "0.48657364", "0.48570013", "0.48568472", "0.48557425", "0.48555914", "0.48550963", "0.48510417", "0.48502585", "0.48456535", "0.484387", "0.48432833", "0.48412123", "0.48391682", "0.48361674", "0.48290518", "0.48243877", "0.48170596", "0.48079622", "0.47919264", "0.47827956", "0.4777095", "0.4776649", "0.47736618", "0.47730917", "0.47705632", "0.47653532", "0.4756454", "0.4734139", "0.4733906", "0.47226155", "0.4719467", "0.47176373", "0.47172096", "0.47166345", "0.47164068", "0.47163415", "0.47153816", "0.4714649", "0.4710761", "0.47106117", "0.47034428", "0.4698475", "0.4693774", "0.4690239", "0.46810684", "0.46735573", "0.46723154", "0.4671075", "0.46692687", "0.46627858", "0.46587133", "0.46579242", "0.4657704", "0.46552715", "0.46552715", "0.4653389", "0.46524897", "0.46486965", "0.46463934", "0.46462476", "0.46450648", "0.46427557", "0.46424696", "0.4640919", "0.46350932", "0.46321175", "0.46319675" ]
0.0
-1
The instance must be in the running state when you call this operation. This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def modify_audit_log_filter_async( self, request: dds_20151201_models.ModifyAuditLogFilterRequest, ) -> dds_20151201_models.ModifyAuditLogFilterResponse: runtime = util_models.RuntimeOptions() return await self.modify_audit_log_filter_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def on_start(self):\r\n self.log()", "def log_runtime(label, mean_time, std, instances):\n pass", "def on_sync(self):\r\n self.log()", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def monitor(self):", "def doBackup(self):\n self.logger.log(\"Begin to backup instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__bakStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Backup instance status successfully.\")\n self.logger.closeLog()", "def execute(self):\n\n c = self.config\n regions = dict((x.name, x) for x in boto.ec2.regions(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key']))\n connect = regions[c['region']].connect(\n aws_access_key_id=c['access_key'],\n aws_secret_access_key=c['secret_access_key'])\n volume = connect.get_all_volumes([c['volume_id']])[0]\n volume.create_snapshot(c['volume_id'])\n snapshots = {}\n for x in connect.get_all_snapshots():\n if x.volume_id == c['volume_id']:\n snapshots.update({x.id: x.start_time})\n snapshots = sorted(snapshots.items(), key=lambda (k, v): (v, k), reverse=True)\n for i in range(int(c['keep']), len(snapshots)):\n connect.delete_snapshot(snapshots[i][0])", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id", "def main(logger):\n logger.info('Snapshot Reaper starting')\n keep_running = True\n while keep_running:\n logger.info(\"Connecting to vCenter {} as {}\".format(const.INF_VCENTER_SERVER, const.INF_VCENTER_USER))\n with vCenter(host=const.INF_VCENTER_SERVER, user=const.INF_VCENTER_USER,\n password=const.INF_VCENTER_PASSWORD) as vcenter:\n try:\n start_loop = time.time()\n reap_snapshots(vcenter, logger)\n except Exception as doh:\n logger.exception(doh)\n keep_running = False\n else:\n ran_for = int(time.time() - start_loop)\n logger.debug('Took {} seconds to check all snapshots'.format(ran_for))\n loop_delta = LOOP_INTERVAL - ran_for\n sleep_for = max(0, loop_delta)\n time.sleep(sleep_for)", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def running(self):\n pass", "def post_run_hook(self, instance, status):\n instance.log_lifecycle(\"post_run\")", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def log_model_without_starting_new_run():\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def do_workload(self):\n pass", "def _sync_log_event(self):\n # sync only after first run and if not currently running\n if self.auto_sync and not self._running and self._has_run:\n self.sync_exp(upload_resources=False)", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def on_up(self):\r\n self.log()", "def start_monitoring(self):\n pass", "def __init__(self, memory=4000):\n self.session = boto3.Session()\n self.batch_client = self.session.client(\"batch\")\n self.logs_client = self.session.client(\"logs\")\n self.memory = memory", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))", "def GetLogs(self):\n raise NotImplementedError()", "def on_left(self):\r\n self.log()", "def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]", "def test_update_instances_schedule_state(self):\n pass", "def service( self ):\n\n self.alive = time.time()", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def _sync(self):\n if self._conf.start_optime:\n # TODO optimize\n log.info(\"locating oplog, it will take a while\")\n oplog_start = self._conf.start_optime\n doc = self._src.client()['local']['oplog.rs'].find_one({'ts': {'$gte': oplog_start}})\n if not doc:\n log.error('no oplogs newer than the specified oplog')\n return\n oplog_start = doc['ts']\n log.info('start timestamp is %s actually' % oplog_start)\n self._last_optime = oplog_start\n self._sync_oplog(oplog_start)\n else:\n oplog_start = get_optime(self._src.client())\n if not oplog_start:\n log.error('get oplog_start failed, terminate')\n sys.exit(1)\n self._last_optime = oplog_start\n self._sync_databases()\n if self._optime_logger:\n self._optime_logger.write(oplog_start)\n log.info('first %s' % oplog_start)\n self._sync_oplog(oplog_start)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def event_log(self):\n pass", "def test_retrieve_instances_schedule_state(self):\n pass", "def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def log(self):\r\n return self._log", "def train_instance(self, epoch, dataset=None, check_point_interval=None, is_restore=False, with_tensorboard=True):\n\n if with_tensorboard:\n self.open_tensorboard()\n\n with tf.Session() as sess:\n saver = tf.train.Saver()\n save_path = os.path.join(self.instance.instance_path, 'check_point')\n check_point_path = os.path.join(save_path, 'instance.ckpt')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n self.log('make save dir')\n\n self.log('init global variables')\n sess.run(tf.global_variables_initializer())\n\n self.log('init summary_writer')\n summary_writer = tf.summary.FileWriter(self.instance.instance_summary_folder_path, sess.graph)\n\n if is_restore:\n self.log('restore check point')\n saver.restore(sess, check_point_path)\n\n batch_size = self.instance.batch_size\n iter_per_epoch = int(dataset.data_size / batch_size)\n self.log('total Epoch: %d, total iter: %d, iter per epoch: %d'\n % (epoch, epoch * iter_per_epoch, iter_per_epoch))\n\n iter_num, loss_val_D, loss_val_G = 0, 0, 0\n for epoch_ in range(epoch):\n for _ in range(iter_per_epoch):\n iter_num += 1\n self.instance.train_model(sess=sess, iter_num=iter_num, dataset=dataset)\n self.__visualizer_task(sess, iter_num, dataset)\n\n self.instance.write_summary(sess=sess, iter_num=iter_num, dataset=dataset,\n summary_writer=summary_writer)\n\n if iter_num % check_point_interval == 0:\n saver.save(sess, check_point_path)\n self.log(\"epoch %s end\" % (epoch_ + 1))\n self.log('train end')\n\n tf.reset_default_graph()\n self.log('reset default graph')\n\n if with_tensorboard:\n self.close_tensorboard()", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def get_full_log(self):\n return self._get_log('full')", "def __periodic_maintenance__(self):\n pass", "def snapshot(self, context, instance, image_id, update_task_state):\n raise NotImplementedError()", "def monitor_instance(self, instance_id):\r\n return self.monitor_instances([instance_id])", "def test_live_migration_src_check_volume_node_not_alive(self):\n\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n dic = {'instance_id': instance_id, 'size': 1}\n v_ref = db.volume_create(self.context, {'instance_id': instance_id,\n 'size': 1})\n t1 = utils.utcnow() - datetime.timedelta(1)\n dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',\n 'topic': 'volume', 'report_count': 0}\n s_ref = db.service_create(self.context, dic)\n\n self.assertRaises(exception.VolumeServiceUnavailable,\n self.scheduler.driver.schedule_live_migration,\n self.context, instance_id, i_ref['host'])\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])\n db.volume_destroy(self.context, v_ref['id'])", "def snapshot(self):\n pass", "def start_instance(InstanceId=None):\n pass", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"", "def on_L2(self):\r\n self.log()", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def audit(self):\n self.ping()", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)", "def __init__(self, batch_size, log_steps):\n self.batch_size = batch_size\n super(TimeHistory, self).__init__()\n self.log_steps = log_steps\n\n # Logs start of step 0 then end of each step based on log_steps interval.\n self.timestamp_log = []", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def execute(self):\n instances = self._get_active_instances()\n if not instances:\n print(\"No running instances.\")\n else:\n output = \"\\nInstanceId\\t\\tName\\n\\n\"\n for instance in instances:\n name = self._get_instance_name(instance)\n instance_id = instance['InstanceId']\n output += f\"{instance_id}\\t{name}\\n\"\n\n print(output)", "def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def save(self):\n if self._dirty:\n # key and timestamp\n data = {\n InstanceStates.INSTANCE_TABLE_NAME: self._service,\n InstanceStates.INSTANCE_TABLE_ACCOUNT_REGION: self._current_account_region,\n InstanceStates.INSTANCE_TABLE_TIMESTAMP: Decimal(time.time())\n }\n\n # store instance states as one column per instance\n for i in self._state_info:\n data[i] = self._state_info[i]\n\n # instances to purge\n if len(self._instances_to_purge) > 0:\n data[InstanceStates.INSTANCE_TABLE_PURGE] = self._instances_to_purge\n\n self.state_table.put_item_with_retries(Item=data)\n self._dirty = False", "def daemonize(self):\n raise NotImplementedError()", "def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def __init__(self, instance):\n self.instance = instance\n self.runs = []", "def __on_backup_created(self, logger, *args):", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def test_get_instance_state(self):\r\n self.peer_grading.get_instance_state()", "def on_L1(self):\r\n self.log()", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def test_delete_instance_changes_power_state(self):\n instance = self._create_fake_instance_obj()\n self.compute._delete_instance(self.context, instance, [])\n self.assertEqual(power_state.NOSTATE, instance.power_state)", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def log_kernel_launch(self, cmd: List[str]) -> None:\n pass", "def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response", "def check(key):\n instance = key.get()\n if not instance:\n logging.warning('Instance does not exist: %s', key)\n return\n\n if not instance.active_metadata_update:\n logging.warning('Instance active metadata operation unspecified: %s', key)\n return\n\n if not instance.active_metadata_update.url:\n logging.warning(\n 'Instance active metadata operation URL unspecified: %s', key)\n return\n\n now = utils.utcnow()\n result = net.json_request(\n instance.active_metadata_update.url, scopes=gce.AUTH_SCOPES)\n if result['status'] != 'DONE':\n return\n\n if result.get('error'):\n logging.warning(\n 'Instance metadata operation failed: %s\\n%s',\n key,\n json.dumps(result, indent=2),\n )\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': False,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_FAILED', instance.hostname)\n reschedule_active_metadata_update(key, instance.active_metadata_update.url)\n metrics.send_machine_event('METADATA_UPDATE_READY', instance.hostname)\n else:\n metrics.instance_set_metadata_time.add(\n (now - instance.active_metadata_update.operation_ts).total_seconds(),\n fields={\n 'success': True,\n 'zone': instance.instance_group_manager.id(),\n },\n )\n metrics.send_machine_event('METADATA_UPDATE_SUCCEEDED', instance.hostname)\n clear_active_metadata_update(key, instance.active_metadata_update.url)", "def main():\n\n parser = init_parser()\n args = parser.parse_args()\n\n # Set up logging.\n level = logging.INFO\n if args.debug:\n level = logging.DEBUG\n logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \\\n '%(lineno)s %(message)s ', level=level)\n logging.info(\"Logging started\")\n\n message = \"Backing up \"\n if args.source_code:\n message += \"source and \"\n message += \"data for: {0}\".format(args.app_id)\n logging.info(message)\n\n zk_connection_locations = appscale_info.get_zk_locations_string()\n zookeeper = zk.ZKTransaction(host=zk_connection_locations)\n db_info = appscale_info.get_db_info()\n table = db_info[':table']\n\n skip_list = args.skip\n if not skip_list:\n skip_list = []\n logging.info(\"Will skip the following kinds: {0}\".format(sorted(skip_list)))\n ds_backup = DatastoreBackup(args.app_id, zookeeper, table,\n source_code=args.source_code, skip_list=sorted(skip_list))\n try:\n ds_backup.run()\n finally:\n zookeeper.close()", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def pre_run_hook(self, instance, private_data_dir):\n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)", "def getLogs():", "def getLogs():", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def start():\n logging.info(\"Execution Started\")", "def test_restart_statestore(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n\n # Restart statestore. Verify one catalogd is assigned as active, the other is\n # assigned as standby.\n self.cluster.statestored.restart()\n wait_time_s = build_flavor_timeout(90, slow_build_timeout=180)\n self.cluster.statestored.service.wait_for_metric_value('statestore.live-backends',\n expected_value=5, timeout=wait_time_s)\n sleep_time_s = build_flavor_timeout(2, slow_build_timeout=5)\n sleep(sleep_time_s)\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n\n unexpected_msg = re.compile(\"Ignore the update of active catalogd since more recent \"\n \"update has been processed ([0-9]+ vs [0-9]+)\")\n self.assert_catalogd_log_contains(\"INFO\", unexpected_msg, expected_count=0)\n self.assert_impalad_log_contains(\"INFO\", unexpected_msg, expected_count=0)", "def clusterMonitor():\n node = os.environ['DIM_DNS_NODE']\n xml = XMLTaskList.TransformXmlToObjects()\n xml.load('../xml/TaskInventory.xml') # loads the Task Inventory\n xml.load('../xml/HLTD01.xml') # loads the Node List\n xml.load('../xml/HLTD02.xml') # loads the Node List\n xml.load('../xml/HLTD03.xml') # loads the Node List\n xml.load('../xml/HLTD04.xml') # loads the Node List\n xml.load('../xml/HLTD06.xml') # loads the Node List\n xml.load('../xml/HLTD07.xml') # loads the Node List\n xml.load('../xml/HLTD08.xml') # loads the Node List\n xml.load('../xml/HLTD09.xml') # loads the Node List\n xml.load('../xml/HLTD10.xml') # loads the Node List\n xml.load('../xml/HLTD11.xml') # loads the Node List\n xml.load('../xml/HLTE04.xml') # loads the Node List\n xml.load('../xml/HLTE06.xml') # loads the Node List\n xml.load('../xml/'+node.upper()+'.xml') # loads the Node List\n collector = ClusterCollector(xml)\n collector.startx()\n collector.run()", "def snapshot(self):\n return self.journal.create_checkpoint()", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def export_getRunningInstancesHistory( self, timespan, bucketSize ):\n return gVirtualMachineDB.getRunningInstancesHistory( timespan, bucketSize )", "def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()", "def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True" ]
[ "0.58150995", "0.5542857", "0.54209995", "0.5350285", "0.53098565", "0.52885276", "0.5250991", "0.5207924", "0.5130098", "0.5095916", "0.5095136", "0.5088931", "0.50700444", "0.50613415", "0.5044827", "0.5042709", "0.50386816", "0.50331056", "0.503233", "0.5017697", "0.5009156", "0.5008124", "0.49987674", "0.49835548", "0.4972169", "0.4968189", "0.49433815", "0.49298778", "0.49279296", "0.4927329", "0.49234065", "0.49173826", "0.49165097", "0.48906064", "0.4868887", "0.4859175", "0.48587155", "0.48580015", "0.48550534", "0.4853448", "0.4847571", "0.48475122", "0.48474595", "0.48455742", "0.48415396", "0.4837697", "0.48373583", "0.48358116", "0.48306936", "0.48258078", "0.48173413", "0.48102853", "0.4793062", "0.4784395", "0.47752312", "0.47750914", "0.47736442", "0.47712663", "0.47706816", "0.47630876", "0.47578207", "0.4736332", "0.4731864", "0.47257143", "0.47200632", "0.47191858", "0.4718802", "0.4716496", "0.47164142", "0.47157782", "0.47133964", "0.47130585", "0.4712683", "0.47124812", "0.4704543", "0.46964896", "0.46953118", "0.46931908", "0.4677675", "0.4675752", "0.46750304", "0.46690586", "0.4667021", "0.46653837", "0.46608603", "0.4658459", "0.46542045", "0.46534953", "0.46510357", "0.46510357", "0.46494573", "0.46491876", "0.46487305", "0.464814", "0.46449748", "0.46428365", "0.46426207", "0.46417624", "0.46349603", "0.46339038", "0.46336934" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def modify_audit_policy_with_options( self, request: dds_20151201_models.ModifyAuditPolicyRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyAuditPolicyResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.audit_log_switch_source): query['AuditLogSwitchSource'] = request.audit_log_switch_source if not UtilClient.is_unset(request.audit_status): query['AuditStatus'] = request.audit_status if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.service_type): query['ServiceType'] = request.service_type if not UtilClient.is_unset(request.storage_period): query['StoragePeriod'] = request.storage_period req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyAuditPolicy', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyAuditPolicyResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def modify_audit_policy_with_options_async( self, request: dds_20151201_models.ModifyAuditPolicyRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyAuditPolicyResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.audit_log_switch_source): query['AuditLogSwitchSource'] = request.audit_log_switch_source if not UtilClient.is_unset(request.audit_status): query['AuditStatus'] = request.audit_status if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.service_type): query['ServiceType'] = request.service_type if not UtilClient.is_unset(request.storage_period): query['StoragePeriod'] = request.storage_period req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyAuditPolicy', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyAuditPolicyResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def _load_disk(self):\r\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6249917", "0.600348", "0.5359398", "0.5338779", "0.5310546", "0.52928746", "0.5282921", "0.5279352", "0.5279001", "0.52591884", "0.521372", "0.52056307", "0.5136519", "0.5106076", "0.5093585", "0.5093585", "0.508893", "0.50791097", "0.507007", "0.5068071", "0.50492823", "0.5047607", "0.5041982", "0.5033806", "0.50300545", "0.50300545", "0.50220966", "0.50161654", "0.49987411", "0.4973591", "0.49686015", "0.4965938", "0.49597555", "0.4947962", "0.4918784", "0.4917084", "0.49052343", "0.49005905", "0.48973042", "0.48950753", "0.48905793", "0.48889422", "0.48816362", "0.48815823", "0.48786694", "0.48686954", "0.4864698", "0.48646566", "0.4858941", "0.48564273", "0.485455", "0.4843747", "0.48412818", "0.48370776", "0.48358908", "0.4833253", "0.48229158", "0.4806377", "0.4805858", "0.48033783", "0.48021865", "0.47964823", "0.47750932", "0.4768405", "0.4758163", "0.4757573", "0.47427362", "0.4735837", "0.4733725", "0.47297028", "0.47232515", "0.47209674", "0.47128278", "0.47117206", "0.47072574", "0.47022194", "0.46999288", "0.46977976", "0.46937203", "0.46901488", "0.4685285", "0.4685246", "0.46755475", "0.4670844", "0.46674314", "0.46657613", "0.4653522", "0.46523577", "0.46428913", "0.46363285", "0.46357659", "0.46331036", "0.46296275", "0.46278575", "0.4626488", "0.46094543", "0.45972398", "0.45966548", "0.45935205", "0.459188", "0.4590052" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
def modify_audit_policy( self, request: dds_20151201_models.ModifyAuditPolicyRequest, ) -> dds_20151201_models.ModifyAuditPolicyResponse: runtime = util_models.RuntimeOptions() return self.modify_audit_policy_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def on_L3(self):\r\n self.log()", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.6248957", "0.6004453", "0.53603053", "0.53381205", "0.5311462", "0.5291663", "0.5282635", "0.5279799", "0.52787983", "0.5260151", "0.52150124", "0.5205798", "0.5137791", "0.5104536", "0.5094829", "0.5094829", "0.5089645", "0.5080881", "0.5069531", "0.5067983", "0.5049315", "0.5047344", "0.50428", "0.50336874", "0.5028792", "0.5028792", "0.50232273", "0.5017229", "0.50001866", "0.4975056", "0.49695352", "0.49651346", "0.4959732", "0.49485096", "0.49201605", "0.491775", "0.4906063", "0.4901353", "0.48973086", "0.48958024", "0.48914644", "0.48892727", "0.48823112", "0.48804468", "0.4877903", "0.48676512", "0.48655146", "0.48638356", "0.48596066", "0.48569125", "0.48556426", "0.48419136", "0.48412684", "0.48380032", "0.48372665", "0.48348084", "0.48249245", "0.48086706", "0.4807052", "0.4804408", "0.48027894", "0.47964686", "0.47765866", "0.4768357", "0.475868", "0.47576597", "0.474328", "0.4735613", "0.47350103", "0.47318107", "0.47237858", "0.47210956", "0.47131303", "0.47130087", "0.47083464", "0.47016108", "0.47003374", "0.4697505", "0.46934873", "0.46901193", "0.4685906", "0.46849328", "0.46755195", "0.46703953", "0.46671495", "0.4667117", "0.46545303", "0.46534565", "0.46448874", "0.46371803", "0.46359396", "0.4634748", "0.4631148", "0.46283913", "0.4627094", "0.46101946", "0.45972055", "0.45956996", "0.45937267", "0.45925382", "0.45902616" ]
0.0
-1
This operation is applicable only to generalpurpose localdisk and dedicated localdisk instances. You can call this operation up to 30 times per minute. To call this operation at a higher frequency, use a Logstore. For more information, see [Manage a Logstore](~~48990~~).
async def modify_audit_policy_async( self, request: dds_20151201_models.ModifyAuditPolicyRequest, ) -> dds_20151201_models.ModifyAuditPolicyResponse: runtime = util_models.RuntimeOptions() return await self.modify_audit_policy_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitorStore():\n # commented to use psutil system info system_info = systeminfo.get_all_info()\n\n system_info = nodeinfo.node_all()\n system_info ['monitored_timestamp'] = config.get_current_system_timestamp()\n\n # Attach sliver info to system info\n system_info.update(sliverinfo.collectAllDataAPI())\n\n s = shelve.open('log_shelf.db', writeback = True)\n\n while(1):\n try:\n try:\n if s.has_key('current_seq_number'):\n #Update current sequence number\n s['current_seq_number']+= 1\n current_seq = s['current_seq_number']\n else:\n current_seq = 1\n s['current_seq_number']= current_seq\n\n print(\"writing to file: \" + str(current_seq))\n\n # print(\"writing to file\" + str(system_info))\n s[str(current_seq)]= system_info\n\n\n finally:\n s.close()\n break\n\n except OSError:\n # In some research devices, the underlying dbm has a bug which needs to be handled explicitly\n print(\"Exception caught while handling shelve file!! OS Error: file not found. Trying again in 1 second\")\n time.sleep(1)\n continue\n\n delete_seen_entries()", "def log(msg):\n\n print('datastore: %s' % msg)", "def disable_disk_logging():\r\n app.set_option(_DISK_LOG_LEVEL_OPTION, LogOptions._LOG_LEVEL_NONE_KEY, force=True)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def test_Drive_log_count(opencl_env: cldrive_env.OpenCLEnvironment):\n logs = opencl_kernel_driver.Drive(\n \"\"\"\nkernel void A(global int* a, global int* b) {\n a[get_global_id(0)] += b[get_global_id(0)];\n}\n\"\"\",\n 16,\n 16,\n opencl_env,\n 5,\n )\n assert len(logs) == 5", "def disk():\n run(env.disk_usage_command % env)", "def record_used(kind, hash):\n if os.path.exists(LOG_FILEPATH):\n log = open(os.path.join(ROOT, 'used'), 'a')\n else:\n log = open(os.path.join(ROOT, 'used'), 'w+')\n\n log.writelines([\"%s...%s\\n\" % (kind, hash)])", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def disk_log_level():\r\n if LogOptions._DISK_LOG_LEVEL is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_LEVEL", "def get_full_log(self):\n return self._get_log('full')", "def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)", "def get_logdb_quota():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><logdb-quota></logdb-quota></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_memory_pressure(self):\n self.execute_query(self.query)\n # This triggers a full GC as of openjdk 1.8.\n call([\"jmap\", \"-histo:live\", str(self.cluster.catalogd.get_pid())])\n # Sleep for logbufsecs=5 seconds to wait for the log to be flushed. Wait 5 more\n # seconds to reduce flakiness.\n time.sleep(10)\n assert self.metadata_cache_string not in self._get_catalog_object()", "def getLogs():", "def getLogs():", "def InsertLog():", "def GetLogs(self):\n raise NotImplementedError()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def logs(lines=50):\n run(f'journalctl --lines {lines} --unit addok --follow')", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def logs(name, namespace, region, start_time, end_time, offset, failed, tail):\n\n start, end = _align_time(start_time, end_time, offset, tail)\n client = ScfLogClient(name, namespace, region, failed)\n client.fetch_log(start, end, tail)", "def objectLogger(params):\n mode = params['mode']\n maxlen = params['maxlen']\n file_format = params['format']\n home_dir = params['storage_dir']\n server = Listener((params['ip'], int(params['port'])))\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n while True:\n conn = server.accept()\n while True:\n try:\n data = conn.recv()\n except EOFError:\n break\n if data:\n storage.append(data)\n else:\n storage.sync(id_generator())\n storage = PersistentDeque(\n home_dir=home_dir,\n maxlen=maxlen,\n file_format=file_format,\n mode=mode,\n )\n conn.close()", "def _load_disk(self):", "def _load_disk(self):", "def redire_spark_logs(bigdl_type=\"float\", log_path=os.getcwd() + \"/bigdl.log\"):\n callBigDlFunc(bigdl_type, \"redirectSparkLogs\", log_path)", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def logs(self, container: Container) -> str:", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def on_sync(self):\r\n self.log()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def reset_used():\n with open(LOG_FILEPATH, 'w+') as logfile:\n pass", "def docker_logs():\n local('docker logs {} -f'.format(project_name))", "def test_slf_notastring():\n oldlogfile = get_logfile()\n start_logfile(1.0)\n start_logfile(True)\n set_logfile(oldlogfile)", "def set_disk_log_level(log_level):\r\n LogOptions._DISK_LOG_SCHEME, LogOptions._DISK_LOG_LEVEL = (\r\n LogOptions._parse_loglevel(log_level, scheme='google'))", "def test_slf_basic():\n oldlogfile = get_logfile()\n with tempfile.TemporaryDirectory() as tmp:\n logfile = os.path.join(tmp, 'log.txt')\n assert ~os.path.exists(logfile)\n start_logfile(logfile)\n assert os.path.exists(logfile)\n set_logfile(oldlogfile)", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def logprllwrite(self):\n sql = '''select to_char(time_waited, 'FM99999999999999990') retvalue \n from v$system_event se, v$event_name en where se.event(+) \n = en.name and en.name = 'log files parallel write' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _load_disk(self):\r\n pass", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def log_time(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n f = open(\"log_dev.txt\",'a',encoding=\"utf8\")\n time_res = end - start\n f.write(\"\\n\"+func.__name__+ \" time = \" + str(time_res))\n return result\n\n return wrapper", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def collect_logs(self, log_dir, label=None, min_t=100, max_t=200):\n pass", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_errors_on_log(self):\n mb = self.maria_backup\n self.assertFalse(mb.errors_on_log()) # at the moment, xtrabackup execution does not generate disk logs", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_scalar(name, value, step, autolog):\n if not autolog:\n mlflow.log_metric(name, value)", "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "def on_L1(self):\r\n self.log()", "def on_L2(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def logs_directory(self):", "def test_log_links(self):\n self.host = synthetic_host(\"myserver-with-nids\", [Nid.Nid(\"192.168.0.1\", \"tcp\", 0)])\n self.create_simple_filesystem(self.host)\n fake_log_message(\"192.168.0.1@tcp testfs-MDT0000\")\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 2)\n self.host.state = \"removed\"\n self.host.save()\n self.mdt.not_deleted = False\n self.mdt.save()\n response = self.api_client.get(\"/api/log/\")\n (event,) = self.deserialize(response)[\"objects\"]\n self.assertEqual(len(event[\"substitutions\"]), 0)", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def test_data_store_local_index(self, tcex: TcEx):\n tcex.api.tc.v2.datastore('local', self.data_type)", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def disk_log_scheme():\r\n if LogOptions._DISK_LOG_SCHEME is None:\r\n LogOptions.set_disk_log_level(app.get_options().twitter_common_log_disk_log_level)\r\n return LogOptions._DISK_LOG_SCHEME", "def test_cbbackupmgr_collect_logs(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n self._collect_logs()", "def log_runtime(label, mean_time, std, instances):\n pass", "def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)", "def show_bigdl_info_logs(bigdl_type=\"float\"):\n callBigDlFunc(bigdl_type, \"showBigDlInfoLogs\")", "def logs(self, **options):\n cmd = self.get_scalingo_prefix_cmd()\n cmd += \" logs -f\"\n for name, val in options.items():\n if len(name) == 1:\n cmd += f\" -{name}\"\n else:\n cmd += f\" --{name}\"\n if val:\n cmd += f\" {val}\"\n if self.env_name == \"local\":\n raise ValueError(\"No logs on local environment\")\n else:\n asyncio.run(self.async_run(cmd))", "def store_data(batch_df: DataFrame, output_delta_lake_path):\n batch_df.select(col(\"MeterId\"),\n col(\"SupplierId\"),\n col(\"Measurement\"),\n col(\"ObservationTime\")) \\\n .repartition(\"SupplierId\") \\\n .write \\\n .partitionBy(\"SupplierId\") \\\n .format(\"delta\") \\\n .mode(\"append\") \\\n .save(output_delta_lake_path)", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def load(lb_id, backend='memory'):\n return b_api.fetch(backend).logbook_get(lb_id)", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def log_mounts_info(mounts):\n if isinstance(mounts, str):\n mounts = [mounts]\n\n g.log.info(\"Start logging mounts information:\")\n for mount_obj in mounts:\n g.log.info(\"Information of mount %s:%s\", mount_obj.client_system,\n mount_obj.mountpoint)\n # Mount Info\n g.log.info(\"Look For Mountpoint:\\n\")\n cmd = \"mount | grep %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Disk Space Usage\n g.log.info(\"Disk Space Usage Of Mountpoint:\\n\")\n cmd = \"df -h %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Long list the mountpoint\n g.log.info(\"List Mountpoint Entries:\\n\")\n cmd = \"ls -ld %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)\n\n # Stat mountpoint\n g.log.info(\"Mountpoint Status:\\n\")\n cmd = \"stat %s\" % mount_obj.mountpoint\n _, _, _ = g.run(mount_obj.client_system, cmd)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def set_size_based_rotating_log(log_path=LOGGER_PATH, log_name=LOGGER_FILENAME):\n\n # Checks if the path exists otherwise tries to create it\n if not os.path.exists(log_path):\n try:\n os.makedirs(log_path)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(exc_type, exc_value, exc_traceback)\n\n # Sets the format and level of logging records\n format = '%(asctime)s - %(levelname)s - [%(pathname)s:%(lineno)s - %(funcName)10s() ] - %(message)s'\n formatter = Formatter(format)\n level=logging.DEBUG\n\n # Does basic configuration for the logging system\n logging.basicConfig(format=format, level=level)\n\n # Instantiates a logger object\n logger = logging.getLogger(\"\")\n\n handler = logging.handlers.RotatingFileHandler(filename=log_path+'/'+log_name,\n maxBytes=LOGGER_MAX_SIZE,\n backupCount=LOGGER_MAX_FILES)\n handler.setFormatter(formatter)\n handler.rotator = rotator\n handler.namer = namer\n logger.addHandler(handler)\n return logger", "def test_logging_log_rotate_for_mysql(\n self,\n admin_node,\n k8s_actions,\n show_step,\n os_deployed):\n logger.info('Log rotate for mysql')\n log_path = '/var/log/ccp/mysql/'\n log_file = 'mysql.log'\n\n show_step(1)\n # get cron pod\n cron_pod = [pod for pod in k8s_actions.api.pods.list(\n namespace=ext.Namespace.BASE_NAMESPACE)\n if 'cron-' in pod.name][0]\n # clean files\n utils.rm_files(admin_node, cron_pod, log_path + log_file + '*')\n\n show_step(2)\n for day in range(0, 8):\n utils.create_file(admin_node, cron_pod, log_path + log_file, 110)\n utils.run_daily_cron(admin_node, cron_pod, 'logrotate')\n sleep(5)\n\n show_step(3)\n log_files = utils.list_files(\n admin_node, cron_pod, log_path, log_file + '*')\n assert len(log_files) == 7,\\\n \"Count of log files after rotation is wrong. \" \\\n \"Expected {} Actual {}\".format(log_files, 7)", "def log(cmdDict, kind, error=None):\n\n cmdDict['timestamp'] = str(int(time.time() * 1000))\n cmdDict['logType'] = kind\n cmdDict['_id'] = cmdDict['transactionNumber'] + cmdDict['user'] + cmdDict['command'] + cmdDict['timestamp'] + cmdDict['logType']\n\n if (error != None):\n cmdDict['errorMessage'] = error\n\n try:\n Database.insert(TRANSACT_COLLECT, cmdDict)\n\n except pymongo.errors.PyMongoError as err:\n print(f\"ERROR! Could not log command into database. Failed with: {err}\")", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def get_dismount_mount_record(r, start_time):\n retv = {'state': 'M',\n 'node': r['node'],\n 'volume': r['volume'],\n 'type': r['type'],\n 'logname': r['logname'],\n 'start': start_time,\n 'finish': timeutil.wind_time(start_time, seconds=20, backward=False),\n 'storage_group': 'PROBE_UTILITY',\n 'reads': 0,\n 'writes':0\n }\n return retv", "def azure_fetch_and_push_to_influx(token, azure_data_store, file_path, tag_map, influx_settings):\n df = read_df_from_azure(azure_data_store, file_path, token)\n (host, port, user, password, db_name, batch_size) = influx_settings\n #df=df.resample('1Min').mean()\n write_to_influx(df, tag_map, host, port, user, password, db_name, batch_size)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_data_store_local_new_index(tcex: TcEx):\n data = {'one': 1}\n key = str(uuid.uuid4())\n rid = 'one'\n\n ds = tcex.api.tc.v2.datastore('local', key)\n\n results = ds.add(rid=rid, data=data)\n if results is None:\n assert False, 'datastore add returned None'\n assert results.get('_id') == rid\n assert results.get('_shards', {}).get('successful') == 1", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')", "def on_L3(self):\r\n self.log()", "def fetch_data_logger(fn):\n @functools.wraps(fn)\n def wrapper():\n\n data = fn()\n\n create_or_update_log(data, LOG_TYPES['D'])\n\n return data\n return wrapper", "def list_log_files(fs, devices, start_times, verbose=True, passwords={}):\n import canedge_browser\n\n log_files = []\n\n if len(start_times):\n for idx, device in enumerate(devices):\n start = start_times[idx]\n log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)\n log_files.extend(log_files_device)\n\n if verbose:\n print(f\"Found {len(log_files)} log files\\n\")\n\n return log_files", "def main():\n global tar_file_descr\n\n help_msg = 'Usage: log_collector.py <all | host1[,host2,host3...]>'\n hosts = []\n if len(sys.argv) == 2:\n if '-h' == sys.argv[1] or '--help' == sys.argv[1]:\n print(help_msg)\n sys.exit(0)\n elif 'all' == sys.argv[1]:\n # get logs from all hosts\n hosts = []\n host_objs = CLIENT.host_get_all()\n for host_obj in host_objs:\n hosts.append(host_obj.name)\n else:\n # get logs from specified hosts\n hostnames = sys.argv[1].split(',')\n for host in hostnames:\n if host not in hosts:\n hosts.append(host)\n else:\n print(help_msg)\n sys.exit(1)\n\n # open tar file for storing logs\n fd, tar_path = tempfile.mkstemp(prefix='kolla_support_logs_',\n suffix='.tgz')\n os.close(fd) # avoid fd leak\n\n with tarfile.open(tar_path, 'w:gz') as tar_file_descr:\n # clear out old logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n os.mkdir(LOGDIR)\n\n # gather logs from selected hosts\n try:\n for host in hosts:\n get_logs_from_host(host)\n\n # tar up all the container logs\n tar_file_descr.add(LOGDIR, arcname='container_logs')\n finally:\n # remove uncompressed logs\n if os.path.exists(LOGDIR):\n shutil.rmtree(LOGDIR)\n\n # gather dump output from kolla-cli\n dump_kolla_info()\n\n print('Log collection complete. Logs are at %s' % tar_path)", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def __enable_log_streaming():\n # Enable BigQuery service for the project:\n run_command('gcloud services enable bigquery --project {}'.format(PROJECT_ID))\n\n # Create the BigQuery dataset to store the logs:\n # For details refer to https://cloud.google.com/bigquery/docs/datasets#bigquery-create-dataset-cli\n run_command('bq mk --data_location {} --description \\\"Cloud logging export.\\\" {}'\n .format(LOGS_LOCATION, LOGS_SINK_DATASET_ID), 'already exists')\n\n # Set up a log sink to the above-created BigQuery dataset:\n output_message = run_command('gcloud logging sinks create {} {} --project {} \\\n --log-filter=\\'resource.type=\\\"gcs_bucket\\\" OR resource.type=\\\"project\\\"\\''\n .format(LOGS_SINK_NAME, LOGS_SINK_DESTINATION, PROJECT_ID))\n\n # The service account that will be writing to BQ dataset is listed by the previous command.\n # After extracting the service account from the message, you need to give it BQ Writer role to that service account.\n __set_dataset_access( __find_service_account_in_message(output_message) )", "def get_storage_info(self, address: str, level: StorageLevel):", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def node_storage(self, node):\n def listdir(path, only_dirs=False):\n ents = node.account.sftp_client.listdir(path)\n if not only_dirs:\n return ents\n paths = map(lambda fn: (fn, os.path.join(path, fn)), ents)\n return [p[0] for p in paths if node.account.isdir(p[1])]\n\n store = NodeStorage(RedpandaService.DATA_DIR)\n for ns in listdir(store.data_dir, True):\n if ns == '.coprocessor_offset_checkpoints':\n continue\n ns = store.add_namespace(ns, os.path.join(store.data_dir, ns))\n for topic in listdir(ns.path):\n topic = ns.add_topic(topic, os.path.join(ns.path, topic))\n for num in listdir(topic.path):\n partition = topic.add_partition(\n num, node, os.path.join(topic.path, num))\n partition.add_files(listdir(partition.path))\n return store", "def snapshot(snapshot_type, result_q, time_delta):", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def test_start_new_log(self):\n old_log_file_name = self.device.log_file_name\n self.device.start_new_log(log_name_prefix=self.get_log_suffix())\n self.assertNotEqual(\n old_log_file_name, self.device.log_file_name,\n f\"Expected log file name to change from {old_log_file_name}\")\n self.assertTrue(\n os.path.exists(old_log_file_name),\n f\"Expected old log file name {old_log_file_name} to exist\")\n self.assertTrue(\n os.path.exists(self.device.log_file_name),\n f\"Expected new log file name {self.device.log_file_name} to exist\")" ]
[ "0.62486845", "0.60031223", "0.53603905", "0.53379613", "0.5311335", "0.52926815", "0.5282698", "0.52791435", "0.5279122", "0.525997", "0.52146244", "0.52052414", "0.5137806", "0.51062226", "0.5093556", "0.5093556", "0.5088433", "0.5079323", "0.50690126", "0.5067886", "0.5049035", "0.5046898", "0.50420696", "0.50331473", "0.5030297", "0.5030297", "0.5022431", "0.5016063", "0.49990737", "0.4972949", "0.4968265", "0.4965747", "0.49598062", "0.49484637", "0.49194708", "0.49178702", "0.4905472", "0.49004963", "0.48970413", "0.489522", "0.48909563", "0.4887827", "0.48821267", "0.4881891", "0.48785982", "0.48678872", "0.48652878", "0.48639703", "0.48581663", "0.48568252", "0.4854318", "0.48435727", "0.48408124", "0.48365796", "0.4836489", "0.48329967", "0.4823732", "0.4806852", "0.4805655", "0.48033065", "0.480192", "0.4796162", "0.4775947", "0.47686175", "0.4759035", "0.47575393", "0.47434506", "0.47358102", "0.47347638", "0.47308475", "0.47239348", "0.47207734", "0.47131824", "0.47117183", "0.47061667", "0.4702254", "0.4699498", "0.4697398", "0.46924555", "0.46898118", "0.4685107", "0.46848604", "0.46757394", "0.4671213", "0.46672994", "0.4666417", "0.46534035", "0.4653088", "0.46438026", "0.4637459", "0.4635404", "0.46326485", "0.46303207", "0.46281067", "0.4626461", "0.4609743", "0.45968643", "0.45966214", "0.45938614", "0.45920384", "0.45898995" ]
0.0
-1
> operation is currently unavailable.
def modify_dbinstance_monitor_with_options( self, request: dds_20151201_models.ModifyDBInstanceMonitorRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.granularity): query['Granularity'] = request.granularity if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceMonitor', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceMonitorResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_not_found(self, name):\n raise OperationError(\"Operation '%s' not found\" % name)", "def test_commandRaisesIllegalOperationResponse(self):\n self.assertCommandExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def unavailable(self):\r\n\r\n self._available = False\r\n self.owner.trigger(\"on_unavailable\")", "def available(self):\n raise ClixxException(\"Not implemented.\")", "def operation_enabled(client, name):\n client.configuration.unstable_operations[snake_case(name)] = True", "def not_implemented(self):\n response.status = 501\n return {'message':'server was not able to complete this request'}", "def unavailable(self):\n print(\"\\n**Sorry this Service is unavailable**\\n\")\n self.get_input()", "def operation(self):\n pass", "def set_unavailable(self):\n self[\"available\"] = False", "def is_available():", "def _raise_performing_request_error(self, *args, **kwargs):", "def supports_operation(self, operation: str) -> bool:\n return True", "def async_mark_unavailable(self):\n self._available = False", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def check_availability(self):\n pass", "def error( response ) :\n\t\twarnings.warn(\"deprecated in 0.3.0, use not responseGood()\", DeprecationWarning)\n\t\treturn not Databank.responseGood( response )", "def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res", "def test_parsingRaisesIllegalOperationResponse(self):\n self.assertParseExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def is_unrestricted(self):\n raise exceptions.NotImplementedError()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def onJT808Operation(self):\n pass", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def error_impresion(self):\n self._info(\"error_impresion\")", "def Take_Off_Connection_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_pull_error(self):\n raise NotImplementedError", "def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def test_neg_operate_with_command_invalid(self):\n key = (\"test\", \"demo\", 1)\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": 3, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n key, _, _ = self.as_connection.operate(key, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def test_get_distribution_unavailable_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})\r\n self.assertEqual(response.status_code, 400)", "def __init__(self):\n self._OPERATION = None", "def test_disabled_method(api_client):\n\n response = api_client().get(\"/anything/disabled_method\")\n assert response.status_code == 403", "def is_available(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def is_available(self):\n raise NotImplementedError", "def cbr_not_avalible():\n return \"CBR service is unavailable\", 503", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def available(self):\n\t\traise NotImplementedError", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def Take_Off_Connected_But_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_not_used(self):\n pass", "def isOperational(self, straceOptions):\n return True", "def _order_update_not_supported():\n pecan.abort(405, u._(\"Order update is not supported.\"))", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'account' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.account)\n api_client.not_implemented.assert_called_with(\"account\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def Error(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def ExecuteOpCommand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def offering(self):\r\n raise NotImplementedError()", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unknown_method(self, response):\n raise NoData", "def snmpqosqos_error_rename_not_implementedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implementedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def check_unsupported_ops(self, program):\n\n unsupported_ops = set()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n continue\n if op.type not in _convert_map:\n unsupported_ops.add(op.type)\n if len(unsupported_ops) > 0:\n msg = \"The following operators are not supported for frontend Paddle: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)", "def poll_instruction(self):\n raise NotImplementedError()", "def _onReceiveOperation(self, operation):\n pass", "def test_api_with_invalid_call(self):\n request = self.client.get('/stocks/addstock/', follow=True, secure=True)\n self.assertEqual(request.status_code, 405)", "def Control(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def not_found(error):\n pass", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def wait_for_global_operation(self, operation):\n print('Waiting for %s.' % (operation))\n while True:\n result = self.compute.globalOperations().get(\n project=self.project,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Global operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def ComputeEAvailable(self):\r\n pass", "def available_io(ctx, exchange):\n run_cmd(ctx, exchange)", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "def no_response(self):\n raise NotImplementedError", "def error(self):\n pass", "def opcode_unimplemented(opc):\r\n # FIXME this should be optional, like the implementation itself.\r\n return opc==\"DA\" or opc==\"XCHD\"", "def busy(self):\n pass", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def isOp(self):\n return True", "def _create_method_not_allowed(self):\n body = self.server.create_error(\n 405, 'Method Not Allowed',\n f'No method \\'{self.path}\\' exist.',\n bad=True)\n self._write_response(405, body, content_type=CONTENT_TYPE_ERROR)", "def rpc_get(self, session, rpc, filter_or_none): # pylint: disable=W0613\n raise ncerror.OperationNotSupportedProtoError(rpc)", "def test_store_is_unavailable(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.is_available.return_value = False\n mock_current_session.return_value = mock_store\n with self.assertRaises(ServiceUnavailable):\n controllers.service_status()", "def GetOperation(name):\n client = GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ApikeysOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)", "async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')", "def reject_waiting_call(self) -> None:", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'alerts' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.alerts)\n api_client.not_implemented.assert_called_with(\"alerts\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def snmpqosqos_error_rename_not_implemented(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implemented\n\t\texcept Exception as e:\n\t\t\traise e", "def test_v1_deprecated(self):\n resp = self.app.get('/api/1/inf/esrs',\n headers={'X-Auth': self.token})\n\n status = resp.status_code\n expected = 404\n\n self.assertEqual(status, expected)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def LastOperation(self) -> SocketAsyncOperation:", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_method_not_allowed(self):\n resp = self.app.post('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_bad_method(self):\n\n request = service.get_request('GET', {})\n x = self.start_request_tests(request)\n # GET method not allowed\n self.assertEqual(x.status_code, 405)\n # TBD: check for informativeness\n json.dump(x.to_dict(), sys.stdout, indent=2)", "def test_neg_operate_with_no_parameters(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.operate()\n assert \"argument 'key' (pos 1)\" in str(typeError.value)", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def post(self):\n self.not_supported()", "def forbidden():\n return HttpError(403)", "def Go_Up_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.71143085", "0.6646724", "0.62888014", "0.6260634", "0.6095101", "0.6048084", "0.60210425", "0.59987384", "0.5990421", "0.58476347", "0.5830298", "0.57915527", "0.5750761", "0.5720391", "0.5715984", "0.5704048", "0.5699723", "0.56822145", "0.5653041", "0.5647547", "0.56409156", "0.5630924", "0.561037", "0.56083083", "0.5599652", "0.5591471", "0.55590475", "0.5541379", "0.55231774", "0.551466", "0.55117005", "0.54879093", "0.5486286", "0.5480235", "0.5445915", "0.5443322", "0.5441651", "0.5438167", "0.5434417", "0.5425321", "0.5418594", "0.5412842", "0.538964", "0.53824574", "0.5345291", "0.5337799", "0.5336056", "0.5336056", "0.53357196", "0.5318171", "0.53176045", "0.5317069", "0.5314204", "0.5314197", "0.53112787", "0.53112787", "0.5305375", "0.5305079", "0.52922577", "0.5291288", "0.52894974", "0.5288828", "0.5286483", "0.528157", "0.52797705", "0.52682984", "0.52653956", "0.52592576", "0.5253828", "0.5249391", "0.5243247", "0.523383", "0.52332413", "0.5223539", "0.5222919", "0.52162695", "0.52142215", "0.52060676", "0.5205763", "0.51995367", "0.5196073", "0.51831776", "0.5178991", "0.5170917", "0.51709133", "0.5163509", "0.5157858", "0.51570004", "0.5154816", "0.5154816", "0.5154816", "0.51497835", "0.5149004", "0.51487106", "0.5147121", "0.51414126", "0.5141173", "0.5138029", "0.5132038", "0.5126614", "0.512577" ]
0.0
-1
> operation is currently unavailable.
async def modify_dbinstance_monitor_with_options_async( self, request: dds_20151201_models.ModifyDBInstanceMonitorRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.granularity): query['Granularity'] = request.granularity if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceMonitor', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceMonitorResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_not_found(self, name):\n raise OperationError(\"Operation '%s' not found\" % name)", "def test_commandRaisesIllegalOperationResponse(self):\n self.assertCommandExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def unavailable(self):\r\n\r\n self._available = False\r\n self.owner.trigger(\"on_unavailable\")", "def available(self):\n raise ClixxException(\"Not implemented.\")", "def operation_enabled(client, name):\n client.configuration.unstable_operations[snake_case(name)] = True", "def not_implemented(self):\n response.status = 501\n return {'message':'server was not able to complete this request'}", "def unavailable(self):\n print(\"\\n**Sorry this Service is unavailable**\\n\")\n self.get_input()", "def operation(self):\n pass", "def set_unavailable(self):\n self[\"available\"] = False", "def is_available():", "def _raise_performing_request_error(self, *args, **kwargs):", "def supports_operation(self, operation: str) -> bool:\n return True", "def async_mark_unavailable(self):\n self._available = False", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def check_availability(self):\n pass", "def error( response ) :\n\t\twarnings.warn(\"deprecated in 0.3.0, use not responseGood()\", DeprecationWarning)\n\t\treturn not Databank.responseGood( response )", "def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res", "def test_parsingRaisesIllegalOperationResponse(self):\n self.assertParseExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def is_unrestricted(self):\n raise exceptions.NotImplementedError()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def onJT808Operation(self):\n pass", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def error_impresion(self):\n self._info(\"error_impresion\")", "def Take_Off_Connection_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_pull_error(self):\n raise NotImplementedError", "def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def test_neg_operate_with_command_invalid(self):\n key = (\"test\", \"demo\", 1)\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": 3, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n key, _, _ = self.as_connection.operate(key, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def test_get_distribution_unavailable_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})\r\n self.assertEqual(response.status_code, 400)", "def __init__(self):\n self._OPERATION = None", "def test_disabled_method(api_client):\n\n response = api_client().get(\"/anything/disabled_method\")\n assert response.status_code == 403", "def is_available(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def is_available(self):\n raise NotImplementedError", "def cbr_not_avalible():\n return \"CBR service is unavailable\", 503", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def available(self):\n\t\traise NotImplementedError", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def Take_Off_Connected_But_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_not_used(self):\n pass", "def isOperational(self, straceOptions):\n return True", "def _order_update_not_supported():\n pecan.abort(405, u._(\"Order update is not supported.\"))", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'account' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.account)\n api_client.not_implemented.assert_called_with(\"account\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def Error(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def ExecuteOpCommand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def offering(self):\r\n raise NotImplementedError()", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unknown_method(self, response):\n raise NoData", "def snmpqosqos_error_rename_not_implementedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implementedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def check_unsupported_ops(self, program):\n\n unsupported_ops = set()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n continue\n if op.type not in _convert_map:\n unsupported_ops.add(op.type)\n if len(unsupported_ops) > 0:\n msg = \"The following operators are not supported for frontend Paddle: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)", "def poll_instruction(self):\n raise NotImplementedError()", "def _onReceiveOperation(self, operation):\n pass", "def test_api_with_invalid_call(self):\n request = self.client.get('/stocks/addstock/', follow=True, secure=True)\n self.assertEqual(request.status_code, 405)", "def Control(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def not_found(error):\n pass", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def wait_for_global_operation(self, operation):\n print('Waiting for %s.' % (operation))\n while True:\n result = self.compute.globalOperations().get(\n project=self.project,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Global operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def ComputeEAvailable(self):\r\n pass", "def available_io(ctx, exchange):\n run_cmd(ctx, exchange)", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "def error(self):\n pass", "def no_response(self):\n raise NotImplementedError", "def opcode_unimplemented(opc):\r\n # FIXME this should be optional, like the implementation itself.\r\n return opc==\"DA\" or opc==\"XCHD\"", "def busy(self):\n pass", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def isOp(self):\n return True", "def _create_method_not_allowed(self):\n body = self.server.create_error(\n 405, 'Method Not Allowed',\n f'No method \\'{self.path}\\' exist.',\n bad=True)\n self._write_response(405, body, content_type=CONTENT_TYPE_ERROR)", "def rpc_get(self, session, rpc, filter_or_none): # pylint: disable=W0613\n raise ncerror.OperationNotSupportedProtoError(rpc)", "def test_store_is_unavailable(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.is_available.return_value = False\n mock_current_session.return_value = mock_store\n with self.assertRaises(ServiceUnavailable):\n controllers.service_status()", "def GetOperation(name):\n client = GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ApikeysOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)", "async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def reject_waiting_call(self) -> None:", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'alerts' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.alerts)\n api_client.not_implemented.assert_called_with(\"alerts\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def snmpqosqos_error_rename_not_implemented(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implemented\n\t\texcept Exception as e:\n\t\t\traise e", "def test_v1_deprecated(self):\n resp = self.app.get('/api/1/inf/esrs',\n headers={'X-Auth': self.token})\n\n status = resp.status_code\n expected = 404\n\n self.assertEqual(status, expected)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_method_not_allowed(self):\n resp = self.app.post('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def LastOperation(self) -> SocketAsyncOperation:", "def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_bad_method(self):\n\n request = service.get_request('GET', {})\n x = self.start_request_tests(request)\n # GET method not allowed\n self.assertEqual(x.status_code, 405)\n # TBD: check for informativeness\n json.dump(x.to_dict(), sys.stdout, indent=2)", "def test_neg_operate_with_no_parameters(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.operate()\n assert \"argument 'key' (pos 1)\" in str(typeError.value)", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def post(self):\n self.not_supported()", "def forbidden():\n return HttpError(403)", "def Go_Up_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7114301", "0.66467077", "0.6288743", "0.62601924", "0.60951746", "0.6047948", "0.6021189", "0.5998924", "0.59904593", "0.584738", "0.5830324", "0.57915527", "0.575041", "0.5720417", "0.5716025", "0.5704126", "0.569943", "0.568176", "0.56532323", "0.5647564", "0.5641508", "0.5630823", "0.5610483", "0.56087244", "0.5600127", "0.5591702", "0.5559254", "0.5541191", "0.5523608", "0.55145717", "0.55116284", "0.548836", "0.5486362", "0.54805213", "0.5445867", "0.5442969", "0.5441571", "0.5438054", "0.54341227", "0.54250973", "0.5418677", "0.54129267", "0.53894264", "0.53825575", "0.53450775", "0.5337698", "0.533638", "0.5335524", "0.5335524", "0.5317876", "0.5317385", "0.5316665", "0.53141713", "0.5314042", "0.53111535", "0.53111535", "0.53051543", "0.53048366", "0.5292243", "0.5290731", "0.5289553", "0.5289477", "0.5286381", "0.52813244", "0.52805096", "0.5268036", "0.52653116", "0.52590036", "0.52537304", "0.5248971", "0.52432305", "0.5233373", "0.52333236", "0.5223333", "0.52227056", "0.5216392", "0.52144456", "0.5206335", "0.5205849", "0.51984626", "0.51963687", "0.5182578", "0.5178418", "0.51710933", "0.51704454", "0.51632655", "0.51574504", "0.5157407", "0.51546234", "0.51546234", "0.51546234", "0.5149224", "0.5149197", "0.51491743", "0.5147335", "0.5141901", "0.5140922", "0.5137683", "0.51320446", "0.5126774", "0.5126025" ]
0.0
-1
> operation is currently unavailable.
def modify_dbinstance_monitor( self, request: dds_20151201_models.ModifyDBInstanceMonitorRequest, ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse: runtime = util_models.RuntimeOptions() return self.modify_dbinstance_monitor_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_not_found(self, name):\n raise OperationError(\"Operation '%s' not found\" % name)", "def test_commandRaisesIllegalOperationResponse(self):\n self.assertCommandExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def unavailable(self):\r\n\r\n self._available = False\r\n self.owner.trigger(\"on_unavailable\")", "def available(self):\n raise ClixxException(\"Not implemented.\")", "def operation_enabled(client, name):\n client.configuration.unstable_operations[snake_case(name)] = True", "def not_implemented(self):\n response.status = 501\n return {'message':'server was not able to complete this request'}", "def unavailable(self):\n print(\"\\n**Sorry this Service is unavailable**\\n\")\n self.get_input()", "def operation(self):\n pass", "def set_unavailable(self):\n self[\"available\"] = False", "def is_available():", "def _raise_performing_request_error(self, *args, **kwargs):", "def supports_operation(self, operation: str) -> bool:\n return True", "def async_mark_unavailable(self):\n self._available = False", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def check_availability(self):\n pass", "def error( response ) :\n\t\twarnings.warn(\"deprecated in 0.3.0, use not responseGood()\", DeprecationWarning)\n\t\treturn not Databank.responseGood( response )", "def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res", "def test_parsingRaisesIllegalOperationResponse(self):\n self.assertParseExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def is_unrestricted(self):\n raise exceptions.NotImplementedError()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def onJT808Operation(self):\n pass", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def error_impresion(self):\n self._info(\"error_impresion\")", "def Take_Off_Connection_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_pull_error(self):\n raise NotImplementedError", "def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def test_neg_operate_with_command_invalid(self):\n key = (\"test\", \"demo\", 1)\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": 3, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n key, _, _ = self.as_connection.operate(key, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def test_get_distribution_unavailable_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})\r\n self.assertEqual(response.status_code, 400)", "def __init__(self):\n self._OPERATION = None", "def test_disabled_method(api_client):\n\n response = api_client().get(\"/anything/disabled_method\")\n assert response.status_code == 403", "def is_available(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def is_available(self):\n raise NotImplementedError", "def cbr_not_avalible():\n return \"CBR service is unavailable\", 503", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def available(self):\n\t\traise NotImplementedError", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def Take_Off_Connected_But_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_not_used(self):\n pass", "def isOperational(self, straceOptions):\n return True", "def _order_update_not_supported():\n pecan.abort(405, u._(\"Order update is not supported.\"))", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'account' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.account)\n api_client.not_implemented.assert_called_with(\"account\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def Error(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def ExecuteOpCommand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def offering(self):\r\n raise NotImplementedError()", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unknown_method(self, response):\n raise NoData", "def snmpqosqos_error_rename_not_implementedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implementedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def check_unsupported_ops(self, program):\n\n unsupported_ops = set()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n continue\n if op.type not in _convert_map:\n unsupported_ops.add(op.type)\n if len(unsupported_ops) > 0:\n msg = \"The following operators are not supported for frontend Paddle: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)", "def poll_instruction(self):\n raise NotImplementedError()", "def _onReceiveOperation(self, operation):\n pass", "def test_api_with_invalid_call(self):\n request = self.client.get('/stocks/addstock/', follow=True, secure=True)\n self.assertEqual(request.status_code, 405)", "def Control(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def not_found(error):\n pass", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def wait_for_global_operation(self, operation):\n print('Waiting for %s.' % (operation))\n while True:\n result = self.compute.globalOperations().get(\n project=self.project,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Global operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def ComputeEAvailable(self):\r\n pass", "def available_io(ctx, exchange):\n run_cmd(ctx, exchange)", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "def no_response(self):\n raise NotImplementedError", "def error(self):\n pass", "def opcode_unimplemented(opc):\r\n # FIXME this should be optional, like the implementation itself.\r\n return opc==\"DA\" or opc==\"XCHD\"", "def busy(self):\n pass", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def isOp(self):\n return True", "def _create_method_not_allowed(self):\n body = self.server.create_error(\n 405, 'Method Not Allowed',\n f'No method \\'{self.path}\\' exist.',\n bad=True)\n self._write_response(405, body, content_type=CONTENT_TYPE_ERROR)", "def rpc_get(self, session, rpc, filter_or_none): # pylint: disable=W0613\n raise ncerror.OperationNotSupportedProtoError(rpc)", "def test_store_is_unavailable(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.is_available.return_value = False\n mock_current_session.return_value = mock_store\n with self.assertRaises(ServiceUnavailable):\n controllers.service_status()", "def GetOperation(name):\n client = GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ApikeysOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)", "async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')", "def reject_waiting_call(self) -> None:", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'alerts' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.alerts)\n api_client.not_implemented.assert_called_with(\"alerts\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def snmpqosqos_error_rename_not_implemented(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implemented\n\t\texcept Exception as e:\n\t\t\traise e", "def test_v1_deprecated(self):\n resp = self.app.get('/api/1/inf/esrs',\n headers={'X-Auth': self.token})\n\n status = resp.status_code\n expected = 404\n\n self.assertEqual(status, expected)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def LastOperation(self) -> SocketAsyncOperation:", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_method_not_allowed(self):\n resp = self.app.post('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_bad_method(self):\n\n request = service.get_request('GET', {})\n x = self.start_request_tests(request)\n # GET method not allowed\n self.assertEqual(x.status_code, 405)\n # TBD: check for informativeness\n json.dump(x.to_dict(), sys.stdout, indent=2)", "def test_neg_operate_with_no_parameters(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.operate()\n assert \"argument 'key' (pos 1)\" in str(typeError.value)", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def post(self):\n self.not_supported()", "def forbidden():\n return HttpError(403)", "def Go_Up_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.71143085", "0.6646724", "0.62888014", "0.6260634", "0.6095101", "0.6048084", "0.60210425", "0.59987384", "0.5990421", "0.58476347", "0.5830298", "0.57915527", "0.5750761", "0.5720391", "0.5715984", "0.5704048", "0.5699723", "0.56822145", "0.5653041", "0.5647547", "0.56409156", "0.5630924", "0.561037", "0.56083083", "0.5599652", "0.5591471", "0.55590475", "0.5541379", "0.55231774", "0.551466", "0.55117005", "0.54879093", "0.5486286", "0.5480235", "0.5445915", "0.5443322", "0.5441651", "0.5438167", "0.5434417", "0.5425321", "0.5418594", "0.5412842", "0.538964", "0.53824574", "0.5345291", "0.5337799", "0.5336056", "0.5336056", "0.53357196", "0.5318171", "0.53176045", "0.5317069", "0.5314204", "0.5314197", "0.53112787", "0.53112787", "0.5305375", "0.5305079", "0.52922577", "0.5291288", "0.52894974", "0.5288828", "0.5286483", "0.528157", "0.52797705", "0.52682984", "0.52653956", "0.52592576", "0.5253828", "0.5249391", "0.5243247", "0.523383", "0.52332413", "0.5223539", "0.5222919", "0.52162695", "0.52142215", "0.52060676", "0.5205763", "0.51995367", "0.5196073", "0.51831776", "0.5178991", "0.5170917", "0.51709133", "0.5163509", "0.5157858", "0.51570004", "0.5154816", "0.5154816", "0.5154816", "0.51497835", "0.5149004", "0.51487106", "0.5147121", "0.51414126", "0.5141173", "0.5138029", "0.5132038", "0.5126614", "0.512577" ]
0.0
-1
> operation is currently unavailable.
async def modify_dbinstance_monitor_async( self, request: dds_20151201_models.ModifyDBInstanceMonitorRequest, ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse: runtime = util_models.RuntimeOptions() return await self.modify_dbinstance_monitor_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_not_found(self, name):\n raise OperationError(\"Operation '%s' not found\" % name)", "def test_commandRaisesIllegalOperationResponse(self):\n self.assertCommandExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def unavailable(self):\r\n\r\n self._available = False\r\n self.owner.trigger(\"on_unavailable\")", "def available(self):\n raise ClixxException(\"Not implemented.\")", "def operation_enabled(client, name):\n client.configuration.unstable_operations[snake_case(name)] = True", "def not_implemented(self):\n response.status = 501\n return {'message':'server was not able to complete this request'}", "def unavailable(self):\n print(\"\\n**Sorry this Service is unavailable**\\n\")\n self.get_input()", "def operation(self):\n pass", "def set_unavailable(self):\n self[\"available\"] = False", "def is_available():", "def _raise_performing_request_error(self, *args, **kwargs):", "def supports_operation(self, operation: str) -> bool:\n return True", "def async_mark_unavailable(self):\n self._available = False", "def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def check_availability(self):\n pass", "def error( response ) :\n\t\twarnings.warn(\"deprecated in 0.3.0, use not responseGood()\", DeprecationWarning)\n\t\treturn not Databank.responseGood( response )", "def unable_service(req):\n\tglobal active_\n \n\tactive_ = req.data\n\tres = SetBoolResponse()\n\tres.success = True\n\tres.message = 'Done!'\n\n\treturn res", "def test_parsingRaisesIllegalOperationResponse(self):\n self.assertParseExceptionResponse(\n imap4.IllegalOperation(\"operation\"),\n b\"001\", b\"NO Illegal operation: operation\\r\\n\",\n )", "def is_unrestricted(self):\n raise exceptions.NotImplementedError()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def onJT808Operation(self):\n pass", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}", "def error_impresion(self):\n self._info(\"error_impresion\")", "def Take_Off_Connection_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_pull_error(self):\n raise NotImplementedError", "def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def test_neg_operate_with_command_invalid(self):\n key = (\"test\", \"demo\", 1)\n\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": 3, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n key, _, _ = self.as_connection.operate(key, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def test_get_distribution_unavailable_feature(self):\r\n url = reverse('get_distribution', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})\r\n self.assertEqual(response.status_code, 400)", "def __init__(self):\n self._OPERATION = None", "def test_disabled_method(api_client):\n\n response = api_client().get(\"/anything/disabled_method\")\n assert response.status_code == 403", "def is_available(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def is_available(self):\n raise NotImplementedError", "def cbr_not_avalible():\n return \"CBR service is unavailable\", 503", "def InfraFailure(self):\n return recipe_api.InfraFailure", "def available(self):\n\t\traise NotImplementedError", "def _notYetImplemented(self, val=None):\n raise VimbaException(-1001)", "def Take_Off_Connected_But_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_not_used(self):\n pass", "def isOperational(self, straceOptions):\n return True", "def _order_update_not_supported():\n pecan.abort(405, u._(\"Order update is not supported.\"))", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'account' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.account)\n api_client.not_implemented.assert_called_with(\"account\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def Error(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)", "def test_op_no_ticket(self):\n assert OP_NO_TICKET == 0x4000", "def ExecuteOpCommand(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_operation_old(operation_name):\n op = operations_api.get_operation(operation_name)\n return op", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def offering(self):\r\n raise NotImplementedError()", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def Action(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unknown_method(self, response):\n raise NoData", "def snmpqosqos_error_rename_not_implementedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implementedrate\n\t\texcept Exception as e:\n\t\t\traise e", "def check_unsupported_ops(self, program):\n\n unsupported_ops = set()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n continue\n if op.type not in _convert_map:\n unsupported_ops.add(op.type)\n if len(unsupported_ops) > 0:\n msg = \"The following operators are not supported for frontend Paddle: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)", "def poll_instruction(self):\n raise NotImplementedError()", "def _onReceiveOperation(self, operation):\n pass", "def test_api_with_invalid_call(self):\n request = self.client.get('/stocks/addstock/', follow=True, secure=True)\n self.assertEqual(request.status_code, 405)", "def Control(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def not_found(error):\n pass", "def _check_queryable(self):\n if not self._bucket:\n raise Exception('Bucket has not been selected')", "def wait_for_global_operation(self, operation):\n print('Waiting for %s.' % (operation))\n while True:\n result = self.compute.globalOperations().get(\n project=self.project,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Global operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def ComputeEAvailable(self):\r\n pass", "def available_io(ctx, exchange):\n run_cmd(ctx, exchange)", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "def no_response(self):\n raise NotImplementedError", "def error(self):\n pass", "def opcode_unimplemented(opc):\r\n # FIXME this should be optional, like the implementation itself.\r\n return opc==\"DA\" or opc==\"XCHD\"", "def busy(self):\n pass", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def isOp(self):\n return True", "def _create_method_not_allowed(self):\n body = self.server.create_error(\n 405, 'Method Not Allowed',\n f'No method \\'{self.path}\\' exist.',\n bad=True)\n self._write_response(405, body, content_type=CONTENT_TYPE_ERROR)", "def rpc_get(self, session, rpc, filter_or_none): # pylint: disable=W0613\n raise ncerror.OperationNotSupportedProtoError(rpc)", "def test_store_is_unavailable(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.is_available.return_value = False\n mock_current_session.return_value = mock_store\n with self.assertRaises(ServiceUnavailable):\n controllers.service_status()", "def GetOperation(name):\n client = GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ApikeysOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)", "async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')", "def test_query_inventory_missing_not_found(self):\n resp = self.app.get('/inventories/query', query_string='status=used')\n self.assertEquals(resp.status_code, 404)", "def reject_waiting_call(self) -> None:", "def test_not_implemented(self, api_client):\n runner = CliRunner()\n expected_output = \"Error: 'alerts' subcommand is not implemented yet.\\n\"\n\n api_client.not_implemented.side_effect = RequestFailure(501)\n result = runner.invoke(subcommand.alerts)\n api_client.not_implemented.assert_called_with(\"alerts\")\n assert result.exit_code == 1\n assert result.output == expected_output", "def snmpqosqos_error_rename_not_implemented(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_rename_not_implemented\n\t\texcept Exception as e:\n\t\t\traise e", "def test_v1_deprecated(self):\n resp = self.app.get('/api/1/inf/esrs',\n headers={'X-Auth': self.token})\n\n status = resp.status_code\n expected = 404\n\n self.assertEqual(status, expected)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def LastOperation(self) -> SocketAsyncOperation:", "def test_method_not_allowed(self):\n resp = self.app.post('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_bad_method(self):\n\n request = service.get_request('GET', {})\n x = self.start_request_tests(request)\n # GET method not allowed\n self.assertEqual(x.status_code, 405)\n # TBD: check for informativeness\n json.dump(x.to_dict(), sys.stdout, indent=2)", "def test_neg_operate_with_no_parameters(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.operate()\n assert \"argument 'key' (pos 1)\" in str(typeError.value)", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def post(self):\n self.not_supported()", "def forbidden():\n return HttpError(403)", "def Go_Up_Failed(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7114355", "0.6646821", "0.6288642", "0.6260238", "0.60952985", "0.6048064", "0.60210884", "0.59990513", "0.5990331", "0.5847375", "0.58304346", "0.5791706", "0.57503676", "0.5720432", "0.57162", "0.5704081", "0.5699349", "0.56818974", "0.56532687", "0.5647537", "0.5641481", "0.56309116", "0.5610579", "0.56087375", "0.560004", "0.559173", "0.55592495", "0.55412585", "0.55235416", "0.55145705", "0.55117196", "0.54882133", "0.54865986", "0.5480472", "0.5445821", "0.5442911", "0.5441459", "0.5438194", "0.5434113", "0.54249257", "0.541868", "0.541299", "0.53896064", "0.53827393", "0.53452355", "0.5337806", "0.5336432", "0.5335532", "0.5335532", "0.531804", "0.53175443", "0.53167844", "0.5314073", "0.5314055", "0.53112406", "0.53112406", "0.5305223", "0.53047556", "0.5292282", "0.5290715", "0.5289617", "0.52894366", "0.5286471", "0.5281396", "0.5280378", "0.52680707", "0.52652085", "0.5259031", "0.5253671", "0.52489835", "0.52431715", "0.52333766", "0.52333593", "0.52235186", "0.5222791", "0.52164614", "0.5214372", "0.5206514", "0.5205911", "0.5198515", "0.5196325", "0.5182713", "0.51785153", "0.5171014", "0.5170479", "0.5163412", "0.5157552", "0.5157311", "0.5154641", "0.5154641", "0.5154641", "0.51494175", "0.51493305", "0.51492095", "0.5147266", "0.51419735", "0.5141082", "0.51376367", "0.51322263", "0.5126695", "0.51261103" ]
0.0
-1
This operation is applicable only to replica set instances and sharded cluster instances.
def modify_dbinstance_network_type_with_options( self, request: dds_20151201_models.ModifyDBInstanceNetworkTypeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceNetworkTypeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.classic_expired_days): query['ClassicExpiredDays'] = request.classic_expired_days if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.retain_classic): query['RetainClassic'] = request.retain_classic if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceNetworkType', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceNetworkTypeResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_patch_cluster_role(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_replace_cluster_role(self):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_read_cluster_role(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def delete_cluster(self):", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_hyperflex_cluster(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def test_replace_cluster_resource_quota(self):\n pass", "def replicas(self, replicas):\n\n self._replicas = replicas", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_update_hyperflex_cluster(self):\n pass", "def replication(self):\n return self._replication", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_read_cluster_policy(self):\n pass", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def test_patch_cluster_resource_quota(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_create_cluster_role(self):\n pass", "def share_replica_update(context, share_replica_id, values,\n with_share_data=False, session=None):\n session = session or get_session()\n\n with session.begin():\n _ensure_availability_zone_exists(context, values, session,\n strict=False)\n updated_share_replica = _share_instance_update(\n context, share_replica_id, values, session=session)\n\n if with_share_data:\n updated_share_replica = _set_instances_share_data(\n context, updated_share_replica, session)[0]\n\n return updated_share_replica", "def test_index_nas_shares_by_pool(self):\n pass", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def mmo_replicaset_conf(self, mmo_connection):\n command = {\"replSetGetConfig\" : 1}\n return self.mmo_execute_on_primaries(mmo_connection, command)", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_list_cluster_role(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def get_replica_ips(self):\n return self.membership", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def test_delete_collection_cluster_policy(self):\n pass", "def _load_cluster(self):", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def test_delete_cluster_role(self):\n pass", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def num_replicas_per_shard(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def delete_replicas(self, target_count):\n while len(self.replicas) > target_count:\n self.remove_replica(self.replicas[-1])", "def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):\n last_state = self._last_state_by_server.get(clean_server_name, -1)\n self._last_state_by_server[clean_server_name] = state\n if last_state != state and last_state != -1:\n return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)", "def test_list_cluster_policy(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def cluster_replicas(\n self, node_id: str, target_nodes: Optional[\"TargetNodesT\"] = None\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICAS\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def replica_names(self) -> Sequence[str]:\n return pulumi.get(self, \"replica_names\")", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def test_read_cluster_resource_quota(self):\n pass", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def replicas(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"replicas\")", "def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def resource_type(self):\n return 'cluster'", "def test_update_nas_share_by_pool(self):\n pass", "def auto_config(self, num_replicas=1):\n _ = num_replicas\n return {}", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass", "def rds_resource(session):\n #print type(session)\n rds = session.resource('rds')", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)" ]
[ "0.61823773", "0.575543", "0.571204", "0.5642952", "0.5626418", "0.55562645", "0.54538274", "0.54520047", "0.53823787", "0.5368559", "0.5275368", "0.52671695", "0.5166633", "0.51095724", "0.5102524", "0.5101288", "0.5083015", "0.50822884", "0.5079012", "0.50754446", "0.50624937", "0.50535625", "0.50457174", "0.5012547", "0.49861932", "0.49836007", "0.4980628", "0.49732393", "0.49630383", "0.49580663", "0.49437585", "0.4932733", "0.49284926", "0.49271256", "0.49066064", "0.49021676", "0.48982757", "0.48818594", "0.48706892", "0.4863104", "0.48630345", "0.48610312", "0.4831055", "0.48284152", "0.48142338", "0.48142338", "0.48142338", "0.48142338", "0.48142338", "0.4809317", "0.48016796", "0.48001873", "0.47897038", "0.47825503", "0.477279", "0.47698236", "0.47667995", "0.47607335", "0.47593898", "0.47585157", "0.4755059", "0.47504023", "0.47455022", "0.4744092", "0.4724734", "0.47227192", "0.47184616", "0.47111994", "0.4698751", "0.4698751", "0.46967766", "0.46839392", "0.4682342", "0.4682165", "0.4674512", "0.4669496", "0.46675912", "0.4664118", "0.46616518", "0.46535844", "0.46427697", "0.4642461", "0.46321437", "0.46261752", "0.46260664", "0.4623582", "0.4623437", "0.46199685", "0.4618473", "0.4613912", "0.46108255", "0.4605125", "0.45983157", "0.45973182", "0.45951763", "0.45903495", "0.4587293", "0.45825937", "0.45741287", "0.45738307", "0.4572543" ]
0.0
-1
This operation is applicable only to replica set instances and sharded cluster instances.
async def modify_dbinstance_network_type_with_options_async( self, request: dds_20151201_models.ModifyDBInstanceNetworkTypeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceNetworkTypeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.classic_expired_days): query['ClassicExpiredDays'] = request.classic_expired_days if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.network_type): query['NetworkType'] = request.network_type if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.retain_classic): query['RetainClassic'] = request.retain_classic if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.v_switch_id): query['VSwitchId'] = request.v_switch_id if not UtilClient.is_unset(request.vpc_id): query['VpcId'] = request.vpc_id if not UtilClient.is_unset(request.zone_id): query['ZoneId'] = request.zone_id req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceNetworkType', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceNetworkTypeResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_patch_cluster_role(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_replace_cluster_role(self):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_read_cluster_role(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def delete_cluster(self):", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_hyperflex_cluster(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def test_replace_cluster_resource_quota(self):\n pass", "def replicas(self, replicas):\n\n self._replicas = replicas", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_update_hyperflex_cluster(self):\n pass", "def replication(self):\n return self._replication", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_read_cluster_policy(self):\n pass", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def test_patch_cluster_resource_quota(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def test_create_cluster_role(self):\n pass", "def share_replica_update(context, share_replica_id, values,\n with_share_data=False, session=None):\n session = session or get_session()\n\n with session.begin():\n _ensure_availability_zone_exists(context, values, session,\n strict=False)\n updated_share_replica = _share_instance_update(\n context, share_replica_id, values, session=session)\n\n if with_share_data:\n updated_share_replica = _set_instances_share_data(\n context, updated_share_replica, session)[0]\n\n return updated_share_replica", "def test_index_nas_shares_by_pool(self):\n pass", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def mmo_replicaset_conf(self, mmo_connection):\n command = {\"replSetGetConfig\" : 1}\n return self.mmo_execute_on_primaries(mmo_connection, command)", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_list_cluster_role(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def get_replica_ips(self):\n return self.membership", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def test_delete_collection_cluster_policy(self):\n pass", "def _load_cluster(self):", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def test_delete_cluster_role(self):\n pass", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def num_replicas_per_shard(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def delete_replicas(self, target_count):\n while len(self.replicas) > target_count:\n self.remove_replica(self.replicas[-1])", "def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):\n last_state = self._last_state_by_server.get(clean_server_name, -1)\n self._last_state_by_server[clean_server_name] = state\n if last_state != state and last_state != -1:\n return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)", "def test_list_cluster_policy(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def cluster_replicas(\n self, node_id: str, target_nodes: Optional[\"TargetNodesT\"] = None\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICAS\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def replica_names(self) -> Sequence[str]:\n return pulumi.get(self, \"replica_names\")", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def test_read_cluster_resource_quota(self):\n pass", "def replicas(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"replicas\")", "def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def resource_type(self):\n return 'cluster'", "def test_update_nas_share_by_pool(self):\n pass", "def auto_config(self, num_replicas=1):\n _ = num_replicas\n return {}", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass", "def rds_resource(session):\n #print type(session)\n rds = session.resource('rds')", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)" ]
[ "0.61841893", "0.5753545", "0.57126534", "0.56451964", "0.56280035", "0.5557337", "0.5455816", "0.5453968", "0.5383622", "0.5370417", "0.5277755", "0.5269768", "0.51675713", "0.51113904", "0.5104218", "0.51019776", "0.5083449", "0.5081104", "0.50798476", "0.50763375", "0.50650895", "0.5054479", "0.5046147", "0.5012916", "0.49867472", "0.49848706", "0.49813607", "0.4975331", "0.49648827", "0.49580145", "0.49431318", "0.49341604", "0.49323288", "0.492733", "0.49080053", "0.49040398", "0.4898598", "0.48825565", "0.48725727", "0.4864399", "0.48625237", "0.48619008", "0.48316303", "0.48303512", "0.4815742", "0.4815742", "0.4815742", "0.4815742", "0.4815742", "0.48094958", "0.48055106", "0.48008057", "0.47915643", "0.47833884", "0.47754735", "0.47719592", "0.47675052", "0.4761328", "0.47607028", "0.47570586", "0.4753748", "0.47514245", "0.47469094", "0.47455242", "0.47273862", "0.47236508", "0.47188374", "0.47125617", "0.4700134", "0.4700134", "0.46969256", "0.46854067", "0.46846798", "0.46824265", "0.4674407", "0.46713883", "0.46687746", "0.46655247", "0.46639124", "0.46531653", "0.46447238", "0.4644721", "0.46325418", "0.4629063", "0.46270484", "0.4625269", "0.46245843", "0.46218812", "0.46179214", "0.46151114", "0.4612007", "0.46050638", "0.459951", "0.45976135", "0.45967758", "0.4592274", "0.45866662", "0.4582709", "0.45756033", "0.45737502", "0.4573537" ]
0.0
-1
This operation is applicable only to replica set instances and sharded cluster instances.
def modify_dbinstance_network_type( self, request: dds_20151201_models.ModifyDBInstanceNetworkTypeRequest, ) -> dds_20151201_models.ModifyDBInstanceNetworkTypeResponse: runtime = util_models.RuntimeOptions() return self.modify_dbinstance_network_type_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_patch_cluster_role(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_replace_cluster_role(self):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_read_cluster_role(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def delete_cluster(self):", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_hyperflex_cluster(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def test_replace_cluster_resource_quota(self):\n pass", "def replicas(self, replicas):\n\n self._replicas = replicas", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_update_hyperflex_cluster(self):\n pass", "def replication(self):\n return self._replication", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_read_cluster_policy(self):\n pass", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def test_patch_cluster_resource_quota(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_create_cluster_role(self):\n pass", "def share_replica_update(context, share_replica_id, values,\n with_share_data=False, session=None):\n session = session or get_session()\n\n with session.begin():\n _ensure_availability_zone_exists(context, values, session,\n strict=False)\n updated_share_replica = _share_instance_update(\n context, share_replica_id, values, session=session)\n\n if with_share_data:\n updated_share_replica = _set_instances_share_data(\n context, updated_share_replica, session)[0]\n\n return updated_share_replica", "def test_index_nas_shares_by_pool(self):\n pass", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def mmo_replicaset_conf(self, mmo_connection):\n command = {\"replSetGetConfig\" : 1}\n return self.mmo_execute_on_primaries(mmo_connection, command)", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_list_cluster_role(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def get_replica_ips(self):\n return self.membership", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def test_delete_collection_cluster_policy(self):\n pass", "def _load_cluster(self):", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def test_delete_cluster_role(self):\n pass", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def num_replicas_per_shard(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def delete_replicas(self, target_count):\n while len(self.replicas) > target_count:\n self.remove_replica(self.replicas[-1])", "def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):\n last_state = self._last_state_by_server.get(clean_server_name, -1)\n self._last_state_by_server[clean_server_name] = state\n if last_state != state and last_state != -1:\n return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)", "def test_list_cluster_policy(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def cluster_replicas(\n self, node_id: str, target_nodes: Optional[\"TargetNodesT\"] = None\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICAS\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def replica_names(self) -> Sequence[str]:\n return pulumi.get(self, \"replica_names\")", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def test_read_cluster_resource_quota(self):\n pass", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def replicas(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"replicas\")", "def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def resource_type(self):\n return 'cluster'", "def test_update_nas_share_by_pool(self):\n pass", "def auto_config(self, num_replicas=1):\n _ = num_replicas\n return {}", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass", "def rds_resource(session):\n #print type(session)\n rds = session.resource('rds')", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)" ]
[ "0.61823773", "0.575543", "0.571204", "0.5642952", "0.5626418", "0.55562645", "0.54538274", "0.54520047", "0.53823787", "0.5368559", "0.5275368", "0.52671695", "0.5166633", "0.51095724", "0.5102524", "0.5101288", "0.5083015", "0.50822884", "0.5079012", "0.50754446", "0.50624937", "0.50535625", "0.50457174", "0.5012547", "0.49861932", "0.49836007", "0.4980628", "0.49732393", "0.49630383", "0.49580663", "0.49437585", "0.4932733", "0.49284926", "0.49271256", "0.49066064", "0.49021676", "0.48982757", "0.48818594", "0.48706892", "0.4863104", "0.48630345", "0.48610312", "0.4831055", "0.48284152", "0.48142338", "0.48142338", "0.48142338", "0.48142338", "0.48142338", "0.4809317", "0.48016796", "0.48001873", "0.47897038", "0.47825503", "0.477279", "0.47698236", "0.47667995", "0.47607335", "0.47593898", "0.47585157", "0.4755059", "0.47504023", "0.47455022", "0.4744092", "0.4724734", "0.47227192", "0.47184616", "0.47111994", "0.4698751", "0.4698751", "0.46967766", "0.46839392", "0.4682342", "0.4682165", "0.4674512", "0.4669496", "0.46675912", "0.4664118", "0.46616518", "0.46535844", "0.46427697", "0.4642461", "0.46321437", "0.46261752", "0.46260664", "0.4623582", "0.4623437", "0.46199685", "0.4618473", "0.4613912", "0.46108255", "0.4605125", "0.45983157", "0.45973182", "0.45951763", "0.45903495", "0.4587293", "0.45825937", "0.45741287", "0.45738307", "0.4572543" ]
0.0
-1
This operation is applicable only to replica set instances and sharded cluster instances.
async def modify_dbinstance_network_type_async( self, request: dds_20151201_models.ModifyDBInstanceNetworkTypeRequest, ) -> dds_20151201_models.ModifyDBInstanceNetworkTypeResponse: runtime = util_models.RuntimeOptions() return await self.modify_dbinstance_network_type_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def cluster(self):\n assert False", "def replica_set_name(self):\n ...", "def test_redis_increase_replica_count_usual_case():", "def test_patch_cluster_role(self):\n pass", "def mmo_execute_on_secondary_or_primary(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = set()\n all_replsets = set()\n repl_hosts = self.mmo_shard_servers(mmo_connection)\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n all_replsets.add(shard)\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard) \\\n and shard not in replsets_completed:\n secondary_found = True\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append(\n {\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n #if first_available_only:\n replsets_completed.add(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n # This is the case where there are no secondaries\n for missing_shard in (all_replsets^replsets_completed):\n for doc in repl_hosts:\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],\n auth_dic[\"authentication_database\"])\n if shard == missing_shard and self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard,\n \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def is_replica(self):\n if (\n self.replica\n and self.aip\n and not self.deleted\n and not self.sip\n and not self.dip\n ):\n return True\n return False", "def test_replace_cluster_role(self):\n pass", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def mmo_execute_on_secondaries(self, mmo_connection, command, replicaset=\"all\", first_available_only=False): # TODO add execution database?\n cluster_command_output = []\n replsets_completed = []\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n try:\n\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"SECONDARY\" \\\n and (replicaset == \"all\" or replicaset == shard)\\\n and shard not in replsets_completed:\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n if first_available_only:\n replsets_completed.append(shard)\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": {\"Error\": \"This mongod process is not available\"}})\n else:\n raise excep\n return cluster_command_output", "def mmo_execute_on_primaries(self, mmo_connection, command, replicaset=\"all\"): # TODO add execution database?\n\n auth_dic = self.mmo_get_auth_details_from_connection(mmo_connection)\n\n cluster_command_output = []\n if replicaset == self.config_server_repl_name: # Config server replset\n for doc in self.mmo_config_servers(mmo_connection):\n hostname, port = doc[\"hostname\"], doc[\"port\"]\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"],auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\":\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": command_output})\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({\"hostname\": hostname, \"port\": port, \"shard\": replicaset, \"command_output\": {\"Error\": \"mongod process is not up\"}})\n else:\n raise excep\n else:\n for doc in self.mmo_shard_servers(mmo_connection):\n hostname, port, shard = doc[\"hostname\"], doc[\"port\"], doc[\"shard\"]\n\n try:\n c = self.mmo_connect_mongod(hostname, port, auth_dic[\"username\"], auth_dic[\"password\"], auth_dic[\"authentication_database\"])\n if self.mmo_replica_state(c)[\"name\"] == \"PRIMARY\" and (replicaset == \"all\" or replicaset == shard):\n command_output = c[\"admin\"].command(command)\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": command_output })\n except Exception as excep:\n if str(excep) == \"mongod process is not up\":\n cluster_command_output.append({ \"hostname\": hostname, \"port\": port, \"shard\": shard, \"command_output\": { \"Error\": \"mongod process is not up\" } })\n else:\n raise excep\n return cluster_command_output", "def prepare_replica_for_exchange(self, replica):\n pass", "def crash_safe_replication(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"crash_safe_replication\")", "def test_read_cluster_role(self):\n pass", "def test_replace_cluster_policy(self):\n pass", "def test_patch_cluster_policy(self):\n pass", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def upsert(self):\n\n if self.cluster:\n self.cluster.upsert()\n else:\n super().upsert()", "def delete_cluster(self):", "def use_read_replica_if_available(queryset):\r\n return queryset.using(\"read_replica\") if \"read_replica\" in settings.DATABASES else queryset", "def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")", "def test_patch_hyperflex_cluster(self):\n pass", "def replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, dataset_images, EMBEDDING_BASE_PATH):\n rc = ReplicaCatalog()\n http_location = \"https://workflow.isi.edu/Panorama/Data/CrisisComputing\"\n\n # list of input file objects\n input_images = []\n\n # Adding Images to the replica catalogue\n for image_path in dataset_images:\n name = image_path[1].split(\"/\")[-1]\n image_file = File(name)\n input_images.append(image_file)\n \n path_split = image_path[0].split(\"/\")\n #rc.add_replica(\"local\", image_file, image_path[0])\n rc.add_replica(\"isi\", image_file, os.path.join(http_location, path_split[-2], path_split[-1]))\n\n \n glove_embeddings = File('glove.twitter.27B.200d.txt')\n \n # File objects for train, val and test tweets csv\n train_tweets_name = File(train_tweets_path.split('/')[-1])\n val_tweets_name = File(val_tweets_path.split('/')[-1])\n test_tweets_name = File(test_tweets_path.split('/')[-1])\n \n #rc.add_replica(\"local\", train_tweets_name, train_tweets_path)\n #rc.add_replica(\"local\", val_tweets_name, val_tweets_path)\n #rc.add_replica(\"local\", test_tweets_name, test_tweets_path)\n #rc.add_replica(\"local\", glove_embeddings, os.path.join(os.getcwd(), os.path.join(EMBEDDING_BASE_PATH, GLOVE_EMBEDDING_FILE))) \n \n path_split = train_tweets_path.split('/')\n rc.add_replica(\"isi\", train_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = val_tweets_path.split('/')\n rc.add_replica(\"isi\", val_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n path_split = test_tweets_path.split('/')\n rc.add_replica(\"isi\", test_tweets_name, os.path.join(http_location, path_split[-2], path_split[-1]))\n \n rc.add_replica(\"isi\", glove_embeddings, os.path.join(http_location, \"glove_twitter\", GLOVE_EMBEDDING_FILE)) \n rc.write()\n\n return input_images, train_tweets_name, val_tweets_name, test_tweets_name, glove_embeddings", "def replicas(self, replicas):\n\n self._replicas = replicas", "def test_replace_cluster_resource_quota(self):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def cluster_replicate(\n self, target_nodes: \"TargetNodesT\", node_id: str\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICATE\", node_id, target_nodes=target_nodes\n )", "def replica(self) -> str:\n return pulumi.get(self, \"replica\")", "def share_replica_delete(context, share_replica_id, session=None,\n need_to_update_usages=True):\n session = session or get_session()\n\n share_instance_delete(context, share_replica_id, session=session,\n need_to_update_usages=need_to_update_usages)", "def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def test_update_hyperflex_cluster(self):\n pass", "def replication(self):\n return self._replication", "def test_replace_cluster_resource_quota_status(self):\n pass", "def test_read_cluster_policy(self):\n pass", "def _mix_replicas(self):\n logger.debug(\"Mixing replicas (does nothing for MultiStateSampler)...\")\n\n # Reset storage to keep track of swap attempts this iteration.\n self._n_accepted_matrix[:, :] = 0\n self._n_proposed_matrix[:, :] = 0\n\n # Determine fraction of swaps accepted this iteration.\n n_swaps_proposed = self._n_proposed_matrix.sum()\n n_swaps_accepted = self._n_accepted_matrix.sum()\n swap_fraction_accepted = 0.0\n if n_swaps_proposed > 0:\n swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed # Python 3 uses true division for /\n logger.debug(\"Accepted {}/{} attempted swaps ({:.1f}%)\".format(n_swaps_accepted, n_swaps_proposed,\n swap_fraction_accepted * 100.0))\n return self._replica_thermodynamic_states", "def test_patch_cluster_resource_quota(self):\n pass", "def is_replicated():\n if tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_strategy().num_replicas_in_sync > 1\n return get_tf_replicator() is not None or is_tpu_replicated()", "def share_replicas_get_all(context, with_share_data=False,\n with_share_server=True, session=None):\n session = session or get_session()\n\n result = _share_replica_get_with_filters(\n context, with_share_server=with_share_server, session=session).all()\n\n if with_share_data:\n result = _set_instances_share_data(context, result, session)\n\n return result", "def check_smartstack_replication_for_instance(\n instance_config,\n expected_count,\n smartstack_replication_checker,\n):\n\n crit_threshold = instance_config.get_replication_crit_percentage()\n\n log.info('Checking instance %s in smartstack', instance_config.job_id)\n smartstack_replication_info = \\\n smartstack_replication_checker.get_replication_for_instance(instance_config)\n\n log.debug('Got smartstack replication info for %s: %s' %\n (instance_config.job_id, smartstack_replication_info))\n\n if len(smartstack_replication_info) == 0:\n status = pysensu_yelp.Status.CRITICAL\n output = (\n 'Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '\n 'is valid!\\n'\n ) % instance_config.job_id\n log.error(output)\n else:\n expected_count_per_location = int(expected_count / len(smartstack_replication_info))\n output = ''\n output_critical = ''\n output_ok = ''\n under_replication_per_location = []\n\n for location, available_backends in sorted(smartstack_replication_info.items()):\n num_available_in_location = available_backends.get(instance_config.job_id, 0)\n under_replicated, ratio = is_under_replicated(\n num_available_in_location, expected_count_per_location, crit_threshold,\n )\n if under_replicated:\n output_critical += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n else:\n output_ok += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\\n' % (\n instance_config.job_id, num_available_in_location, expected_count_per_location, location, ratio,\n )\n under_replication_per_location.append(under_replicated)\n\n output += output_critical\n if output_critical and output_ok:\n output += '\\n\\n'\n output += 'The following locations are OK:\\n'\n output += output_ok\n\n if any(under_replication_per_location):\n status = pysensu_yelp.Status.CRITICAL\n output += (\n \"\\n\\n\"\n \"What this alert means:\\n\"\n \"\\n\"\n \" This replication alert means that a SmartStack powered loadbalancer (haproxy)\\n\"\n \" doesn't have enough healthy backends. Not having enough healthy backends\\n\"\n \" means that clients of that service will get 503s (http) or connection refused\\n\"\n \" (tcp) when trying to connect to it.\\n\"\n \"\\n\"\n \"Reasons this might be happening:\\n\"\n \"\\n\"\n \" The service may simply not have enough copies or it could simply be\\n\"\n \" unhealthy in that location. There also may not be enough resources\\n\"\n \" in the cluster to support the requested instance count.\\n\"\n \"\\n\"\n \"Things you can do:\\n\"\n \"\\n\"\n \" * You can view the logs for the job with:\\n\"\n \" paasta logs -s %(service)s -i %(instance)s -c %(cluster)s\\n\"\n \"\\n\"\n \" * Fix the cause of the unhealthy service. Try running:\\n\"\n \"\\n\"\n \" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\\n\"\n \"\\n\"\n \" * Widen SmartStack discovery settings\\n\"\n \" * Increase the instance count\\n\"\n \"\\n\"\n ) % {\n 'service': instance_config.service,\n 'instance': instance_config.instance,\n 'cluster': instance_config.cluster,\n }\n log.error(output)\n else:\n status = pysensu_yelp.Status.OK\n log.info(output)\n send_event(instance_config=instance_config, status=status, output=output)", "def crash_safe_replication_enabled(self) -> bool:\n return pulumi.get(self, \"crash_safe_replication_enabled\")", "def test_create_cluster_role(self):\n pass", "def share_replica_update(context, share_replica_id, values,\n with_share_data=False, session=None):\n session = session or get_session()\n\n with session.begin():\n _ensure_availability_zone_exists(context, values, session,\n strict=False)\n updated_share_replica = _share_instance_update(\n context, share_replica_id, values, session=session)\n\n if with_share_data:\n updated_share_replica = _set_instances_share_data(\n context, updated_share_replica, session)[0]\n\n return updated_share_replica", "def test_index_nas_shares_by_pool(self):\n pass", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def replicas(self) -> int:\n return pulumi.get(self, \"replicas\")", "def test_patch_cluster_resource_quota_status(self):\n pass", "def mmo_replicaset_conf(self, mmo_connection):\n command = {\"replSetGetConfig\" : 1}\n return self.mmo_execute_on_primaries(mmo_connection, command)", "def run(self, image, replicas, scale_replicas, command=None,\n status_wait=True):\n namespace = self.choose_namespace()\n\n name = self.client.create_statefulset(\n namespace=namespace,\n replicas=replicas,\n image=image,\n command=command,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=scale_replicas,\n status_wait=status_wait\n )\n\n self.client.scale_statefulset(\n name,\n namespace=namespace,\n replicas=replicas,\n status_wait=status_wait\n )\n\n self.client.delete_statefulset(\n name=name,\n namespace=namespace,\n status_wait=status_wait\n )", "def test_list_cluster_role(self):\n pass", "def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")", "def _base_replica_repair_test(self, fail_mv_lock=False):\n\n self.prepare(rf=3)\n node1, node2, node3 = self.cluster.nodelist()\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t \"\n \"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)\"))\n\n session.cluster.control_connection.wait_for_schema_agreement()\n\n logger.debug('Write initial data')\n for i in range(1000):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)\".format(v=i))\n\n self._replay_batchlogs()\n\n logger.debug('Verify the data in the MV with CL=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0],\n cl=ConsistencyLevel.ALL\n )\n\n logger.debug('Shutdown node1')\n node1.stop(wait_other_notice=True)\n logger.debug('Delete node1 data')\n node1.clear(clear_all=True)\n\n jvm_args = []\n if fail_mv_lock:\n if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134\n jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]\n jvm_args.append(\"-Dcassandra.test.fail_mv_locks_count=1000\")\n # this should not make Keyspace.apply throw WTE on failure to acquire lock\n node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})\n logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))\n node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)\n logger.debug('Shutdown node2 and node3')\n node2.stop(wait_other_notice=True)\n node3.stop(wait_other_notice=True)\n\n session = self.patient_exclusive_cql_connection(node1)\n session.execute('USE ks')\n\n logger.debug('Verify that there is no data on node1')\n for i in range(1000):\n assert_none(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i)\n )\n\n logger.debug('Restarting node2 and node3')\n node2.start(wait_for_binary_proto=True)\n node3.start(wait_for_binary_proto=True)\n\n # Just repair the base replica\n logger.debug('Starting repair on node1')\n node1.nodetool(\"repair ks t\")\n\n logger.debug('Verify data with cl=ALL')\n for i in range(1000):\n assert_one(\n session,\n \"SELECT * FROM t_by_v WHERE v = {}\".format(i),\n [i, i, 'a', 3.0]\n )", "def get_replica_ips(self):\n return self.membership", "def initialize_replicas(self):\n try:\n self.replicas+1\n except:\n print \"Ensemble MD Toolkit Error: Number of replicas must be \\\n defined for pattern ReplicaExchange!\"\n raise\n\n\n replicas = []\n N = self.replicas\n for k in range(N):\n r = ReplicaP(k)\n replicas.append(r)\n\n return replicas", "def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)", "def replace_with_insufficient_replicas_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n if DISABLE_VNODES:\n num_tokens = 1\n else:\n # a little hacky but grep_log returns the whole line...\n num_tokens = int(node3.get_conf_option('num_tokens'))\n\n debug(\"testing with num_tokens: {}\".format(num_tokens))\n\n debug(\"Inserting Data...\")\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)'])\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node3.stop(wait_other_notice=True)\n\n # stop other replica\n debug(\"Stopping node2 (other replica)\")\n node2.stop(wait_other_notice=True)\n\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n cluster.add(node4, False)\n node4.start(replace_address='127.0.0.3', wait_for_binary_proto=False, wait_other_notice=False)\n\n # replace should fail due to insufficient replicas\n node4.watch_log_for(\"Unable to find sufficient sources for streaming range\")\n assert_not_running(node4)", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def scale_app(self, name, replicas):\n raise NotImplementedError", "def check_replica_primary(con,host, warning, critical,perf_data):\n if warning is None and critical is None:\n warning=1\n warning=warning or 2\n critical=critical or 2\n\n primary_status=0\n message=\"Primary server has not changed\"\n db=con[\"nagios\"]\n data=get_server_status(con)\n current_primary=data['repl'].get('primary')\n saved_primary=get_stored_primary_server_name(db)\n if current_primary is None:\n current_primary = \"None\"\n if saved_primary is None:\n saved_primary = \"None\"\n if current_primary != saved_primary:\n last_primary_server_record = {\"server\": current_primary}\n db.last_primary_server.update({\"_id\": \"last_primary\"}, {\"$set\" : last_primary_server_record} , upsert=True, safe=True)\n message = \"Primary server has changed from %s to %s\" % (saved_primary, current_primary)\n primary_status=1\n return check_levels(primary_status,warning,critical,message)", "def test_delete_collection_cluster_policy(self):\n pass", "def _load_cluster(self):", "def replica_configuration(self) -> 'outputs.ReplicaConfigurationResponse':\n return pulumi.get(self, \"replica_configuration\")", "def test_delete_cluster_role(self):\n pass", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def num_replicas_per_shard(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def num_replicas_per_shard(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_replicas_per_shard\")", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def delete_replicas(self, target_count):\n while len(self.replicas) > target_count:\n self.remove_replica(self.replicas[-1])", "def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):\n last_state = self._last_state_by_server.get(clean_server_name, -1)\n self._last_state_by_server[clean_server_name] = state\n if last_state != state and last_state != -1:\n return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)", "def test_list_cluster_policy(self):\n pass", "def enable_replicate(self, req, id, body):\n LOG.info(_LI(\"Enable volume's replicate, volume_id: %s\"), id)\n context = req.environ['sgservice.context']\n volume = self.service_api.get(context, id)\n replicate_info = self.service_api.enable_replicate(context, volume)\n return self._view_builder.action_summary(req, replicate_info)", "def cluster_replicas(\n self, node_id: str, target_nodes: Optional[\"TargetNodesT\"] = None\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER REPLICAS\", node_id, target_nodes=target_nodes\n )", "def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def replica_names(self) -> Sequence[str]:\n return pulumi.get(self, \"replica_names\")", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()", "def mmo_replica_state(self, mmo_connection):\n\n # https://docs.mongodb.org/manual/reference/replica-states/\n replica_states = [\n { \"id\": 0, \"name\": \"STARTUP\", \"description\": \"Not yet an active member of any set. All members start up in this state. The mongod parses the replica set configuration document while in STARTUP.\" },\n { \"id\": 1, \"name\": \"PRIMARY\", \"description\": \"The member in state primary is the only member that can accept write operations.\" },\n { \"id\": 2, \"name\": \"SECONDARY\", \"description\": \"A member in state secondary is replicating the data store. Data is available for reads, although they may be stale.\" },\n { \"id\": 3, \"name\": \"RECOVERING\", \"description\": \"Can vote. Members either perform startup self-checks, or transition from completing a rollback or resync.\" },\n { \"id\": 5, \"name\": \"STARTUP2\", \"description\": \"The member has joined the set and is running an initial sync.\" },\n { \"id\": 6, \"name\": \"UNKNOWN\", \"description\": \"The member's state, as seen from another member of the set, is not yet known.\" },\n { \"id\": 7, \"name\": \"ARBITER\", \"description\": \"Arbiters do not replicate data and exist solely to participate in elections.\" },\n { \"id\": 8, \"name\": \"DOWN\", \"description\": \"The member, as seen from another member of the set, is unreachable.\" },\n { \"id\": 9, \"name\": \"ROLLBACK\", \"description\": \"This member is actively performing a rollback. Data is not available for reads.\" },\n { \"id\": 10, \"name\": \"REMOVED\", \"description\": \"This member was once in a replica set but was subsequently removed.\" }\n ]\n\n if self.mmo_is_mongod(mmo_connection):\n return replica_states[mmo_connection[\"admin\"].command(\"replSetGetStatus\")[\"myState\"]]\n else:\n raise Exception(\"Not a mongod process\")", "def test_read_cluster_resource_quota(self):\n pass", "def replicas(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"replicas\")", "def sstable_repairedset_test(self):\n cluster = self.cluster\n cluster.set_configuration_options(values={'hinted_handoff_enabled': False})\n cluster.populate(2).start()\n node1, node2 = cluster.nodelist()\n node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=2)', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', '-rate', 'threads=50'])\n\n node1.flush()\n node2.flush()\n\n node2.stop(gently=False)\n\n node2.run_sstablerepairedset(keyspace='keyspace1')\n node2.start(wait_for_binary_proto=True)\n\n initialOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n initialOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([initialOut1, initialOut2]))\n debug(\"Repair timestamps are: {}\".format(matches))\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2, uniquematches)\n\n self.assertGreaterEqual(max(matchcount), 1, matchcount)\n\n self.assertIn('Repaired at: 0', '\\n'.join([initialOut1, initialOut2]))\n\n node1.stop()\n node2.stress(['write', 'n=15K', 'no-warmup', '-schema', 'replication(factor=2)'])\n node2.flush()\n node1.start(wait_for_binary_proto=True)\n\n if cluster.version() >= \"2.2\":\n node1.repair()\n else:\n node1.nodetool(\"repair -par -inc\")\n\n finalOut1 = node1.run_sstablemetadata(keyspace='keyspace1').stdout\n finalOut2 = node2.run_sstablemetadata(keyspace='keyspace1').stdout\n\n matches = findall('(?<=Repaired at:).*', '\\n'.join([finalOut1, finalOut2]))\n\n debug(matches)\n\n uniquematches = set(matches)\n matchcount = Counter(matches)\n\n self.assertGreaterEqual(len(uniquematches), 2)\n\n self.assertGreaterEqual(max(matchcount), 2)\n\n self.assertNotIn('Repaired at: 0', '\\n'.join([finalOut1, finalOut2]))", "def start_wsrep_new_cluster(self):\r\n return execute(self._start_wsrep_new_cluster)", "def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def resource_type(self):\n return 'cluster'", "def test_update_nas_share_by_pool(self):\n pass", "def auto_config(self, num_replicas=1):\n _ = num_replicas\n return {}", "def failover_host(self, context, volumes, secondary_id=None, groups=None):\n volume_updates = []\n back_end_ip = None\n svc_host = volume_utils.extract_host(self.host, 'backend')\n service = objects.Service.get_by_args(context, svc_host,\n 'cinder-volume')\n\n if secondary_id and secondary_id != self.replica.backend_id:\n LOG.error(\"Kaminario driver received failover_host \"\n \"request, But backend is non replicated device\")\n raise exception.UnableToFailOver(reason=_(\"Failover requested \"\n \"on non replicated \"\n \"backend.\"))\n\n if (service.active_backend_id and\n service.active_backend_id != self.configuration.san_ip):\n self.snap_updates = []\n rep_volumes = []\n # update status for non-replicated primary volumes\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n if v.replication_status != K2_REP_FAILED_OVER and vol.total:\n status = 'available'\n if v.volume_attachment:\n map_rs = self.client.search(\"mappings\",\n volume=vol.hits[0])\n status = 'in-use'\n if map_rs.total:\n map_rs.hits[0].delete()\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'status': status}})\n else:\n rep_volumes.append(v)\n\n # In-sync from secondaray array to primary array\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n\n if (tgt_ssn.state == 'failed_over' and\n tgt_ssn.current_role == 'target' and vol.total and src_ssn):\n map_rs = self.client.search(\"mappings\", volume=vol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n tgt_ssn.state = 'in_sync'\n tgt_ssn.save()\n self._check_for_status(src_ssn, 'in_sync')\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", rsession_name)\n\n self._is_user_snap_sync_finished()\n\n # Delete secondary volume mappings and create snapshot\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n map_rs = self.target.search(\"mappings\",\n volume=rvol.hits[0])\n if map_rs.total:\n map_rs.hits[0].delete()\n gen_no = self._create_volume_replica_user_snap(self.target,\n tgt_ssn)\n self.snap_updates.append({'tgt_ssn': tgt_ssn,\n 'gno': gen_no,\n 'stime': time.time()})\n self._is_user_snap_sync_finished()\n # changing source sessions to failed-over\n for v in rep_volumes:\n vol_name = self.get_volume_name(v['id'])\n vol = self.client.search(\"volumes\", name=vol_name)\n rvol_name = self.get_rep_name(vol_name)\n rvol = self.target.search(\"volumes\", name=rvol_name)\n session_name = self.get_session_name(v['id'])\n rsession_name = self.get_rep_name(session_name)\n ssn = self.target.search(\"replication/sessions\",\n name=rsession_name)\n if ssn.total:\n tgt_ssn = ssn.hits[0]\n ssn = self.client.search(\"replication/sessions\",\n name=session_name)\n if ssn.total:\n src_ssn = ssn.hits[0]\n if (rvol.total and src_ssn.state == 'in_sync' and\n src_ssn.current_role == 'target'):\n src_ssn.state = 'failed_over'\n src_ssn.save()\n self._check_for_status(tgt_ssn, 'suspended')\n LOG.debug(\"The target session: %s state is \"\n \"changed to failed over\", session_name)\n\n src_ssn.state = 'in_sync'\n src_ssn.save()\n LOG.debug(\"The target session: %s state is \"\n \"changed to in sync\", session_name)\n rep_status = fields.ReplicationStatus.DISABLED\n volume_updates.append({'volume_id': v['id'],\n 'updates':\n {'replication_status': rep_status}})\n\n back_end_ip = self.configuration.san_ip\n else:\n \"\"\"Failover to replication target.\"\"\"\n for v in volumes:\n vol_name = self.get_volume_name(v['id'])\n rv = self.get_rep_name(vol_name)\n if self.target.search(\"volumes\", name=rv).total:\n self._failover_volume(v)\n volume_updates.append(\n {'volume_id': v['id'],\n 'updates':\n {'replication_status': K2_REP_FAILED_OVER}})\n else:\n volume_updates.append({'volume_id': v['id'],\n 'updates': {'status': 'error', }})\n back_end_ip = self.replica.backend_id\n return back_end_ip, volume_updates, []", "def cluster_assign(images_lists, dataset):\n assert images_lists is not None\n pseudolabels = []\n image_indexes = []\n for cluster, images in enumerate(images_lists):\n image_indexes.extend(images)\n pseudolabels.extend([cluster] * len(images))\n print(image_indexes)\n print(pseudolabels)\n \n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n t = transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize])\n\n return ReassignedDataset(image_indexes, pseudolabels, dataset, t)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass", "def rds_resource(session):\n #print type(session)\n rds = session.resource('rds')", "def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def _setup_cluster(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def multi_dc_replace_with_rf1_test(self):\n cluster = self.cluster\n cluster.populate([1, 1])\n cluster.start()\n node1, node2 = cluster.nodelist()\n\n node1 = cluster.nodes['node1']\n yaml_config = \"\"\"\n # Create the keyspace and table\n keyspace: keyspace1\n keyspace_definition: |\n CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};\n table: users\n table_definition:\n CREATE TABLE users (\n username text,\n first_name text,\n last_name text,\n email text,\n PRIMARY KEY(username)\n ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};\n insert:\n partitions: fixed(1)\n batchtype: UNLOGGED\n queries:\n read:\n cql: select * from users where username = ?\n fields: samerow\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w+') as stress_config:\n stress_config.write(yaml_config)\n stress_config.flush()\n node1.stress(['user', 'profile=' + stress_config.name, 'n=10k', 'no-warmup',\n 'ops(insert=1)', '-rate', 'threads=50'])\n\n session = self.patient_cql_connection(node1)\n\n # change system_auth keyspace to 2 (default is 1) to avoid\n # \"Unable to find sufficient sources for streaming\" warning\n if cluster.cassandra_version() >= '2.2.0':\n session.execute(\"\"\"\n ALTER KEYSPACE system_auth\n WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};\n \"\"\")\n\n # Save initial data\n stress_table = 'keyspace1.users'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.TWO)\n initial_data = rows_to_list(session.execute(query))\n\n # stop node to replace\n debug(\"Stopping node to replace.\")\n node2.stop(wait_other_notice=True)\n\n node3 = new_node(cluster, data_center='dc2')\n node3.start(replace_address='127.0.0.2', wait_for_binary_proto=True)\n\n assert_bootstrap_state(self, node3, 'COMPLETED')\n\n # Check that keyspace was replicated from dc1 to dc2\n self.assertFalse(node3.grep_log(\"Unable to find sufficient sources for streaming range\"))\n\n # query should work again with node1 stopped\n node1.stop(wait_other_notice=True)\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node3)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.LOCAL_ONE)", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass" ]
[ "0.61834", "0.575454", "0.5711087", "0.56454813", "0.5626809", "0.55557746", "0.5455231", "0.5451529", "0.53820515", "0.53685325", "0.5276885", "0.5269998", "0.51683676", "0.51078784", "0.51020664", "0.51005435", "0.5082117", "0.50819266", "0.5079213", "0.50754535", "0.5062905", "0.5054602", "0.50453144", "0.50147337", "0.4985906", "0.4985729", "0.49813518", "0.49746415", "0.49657443", "0.49571818", "0.49422914", "0.4933104", "0.49308908", "0.49262258", "0.4905842", "0.4902875", "0.48978463", "0.48799348", "0.48700443", "0.48613858", "0.48608682", "0.48603505", "0.48300236", "0.48286164", "0.48156935", "0.48156935", "0.48156935", "0.48156935", "0.48156935", "0.48084727", "0.48047262", "0.4801445", "0.4789759", "0.47834814", "0.47729683", "0.47712186", "0.47692114", "0.4762642", "0.47600186", "0.4758706", "0.47560227", "0.4750343", "0.47451994", "0.47448757", "0.4727785", "0.47222576", "0.47176006", "0.4711186", "0.469821", "0.469821", "0.46962255", "0.46836296", "0.4683562", "0.4682668", "0.46746698", "0.46704304", "0.46671784", "0.4666276", "0.46625626", "0.46524522", "0.46446708", "0.4643335", "0.46318763", "0.4627574", "0.4625719", "0.4624651", "0.4624099", "0.4620725", "0.46167937", "0.46139437", "0.46107686", "0.4605176", "0.45984", "0.45970395", "0.45951182", "0.45910433", "0.45854288", "0.45817202", "0.45738068", "0.45730677", "0.45730084" ]
0.0
-1
TDE allows you to perform realtime I/O encryption and decryption on data files. Data is encrypted before it is written to a disk and is decrypted when it is read from the disk to the memory. For more information, see [Configure TDE](~~131048~~). > You cannot disable TDE after it is enabled.
def modify_dbinstance_tdewith_options( self, request: dds_20151201_models.ModifyDBInstanceTDERequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceTDEResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.encryption_key): query['EncryptionKey'] = request.encryption_key if not UtilClient.is_unset(request.encryptor_name): query['EncryptorName'] = request.encryptor_name if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_arn): query['RoleARN'] = request.role_arn if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.tdestatus): query['TDEStatus'] = request.tdestatus req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceTDE', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceTDEResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")", "def save_data(self):\n\n #\n # t=self.t[0:-1:self.R].reshape([self.t[0:-1:self.R].shape[0],1])\n\n def deterministic_data():\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'DetParameters' + strPrefix + '.txt'\n name2 = 'DetSolution' + strPrefix + '.txt'\n name3 = 'DetRefSolution' + str(self.dt) + '.txt'\n\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n np.savetxt(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n\n def stochastic_data():\n \"\"\"\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n Utem1 = self.Xtem[:, 0]\n Utem2 = self.Xtem[:, 1]\n Utem3 = self.Xtem[:, 2]\n \"\"\"\n\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'StoParameters' + strPrefix + '.txt'\n '''\n name2 = 'StoSolution' + strPrefix + '.txt'\n name3 = 'StoRefSolution' + str(self.dt) + '.txt'\n '''\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n '''\n np.save(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3, Utem1,\n Utem2, Utem3\n )\n ))\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3\n )\n ))\n if self.sigma1 == 0.0:\n if self.sigma2 == 0.0:\n DeterministicData()\n return\n StochasticData()\n '''\n\n return", "def setup_tdd(self,earfcn,bwMhz,powerdBm,ud_config,special_sf_config=0,ul_timing_advance=0,with_rx=False):\r\r\n\r\r\n self.setup_modem()\r\r\n self.instr.setup_4g_tx_test(cable_loss_dB=self.testConfig.cable_loss)\r\r\n self.teststep_idx = 0\r\r\n band,freq_ul,freq_dl = lte_util.get_lte_ul_dl_freq_band(earfcn)\r\r\n\r\r\n self.set_band(band=band)\r\r\n self.modemObj.set_rat_band(rat='LTE', band=band)\r\r\n duplex_mode = self.get_duplex_mode()\r\r\n assert(duplex_mode == \"TDD\")\r\r\n self.instr.lte_tx.set_duplex_mode(duplex_mode=duplex_mode)\r\r\n self.instr.lte_tx.set_band(band=band)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul)\r\r\n self.instr.lte_tx.set_rf_freqMHz(freqMHz=freq_ul)\r\r\n self.set_bw(bwMHz=bwMhz)\r\r\n rf_config = LTE_rf_config(bwMHz=bwMhz)\r\r\n self.modemObj.set_rb(direction='ul', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb(direction='dl', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb_start(rb_offset=rf_config.rb_offset)\r\r\n self.modemObj.set_rb_len(rb_len=rf_config.rb_len)\r\r\n rf_config.check_config()\r\r\n self.instr.lte_tx.set_channel_bw_MHz(bwMHz=bwMhz)\r\r\n self.modemObj.send_ul_pattern()\r\r\n\r\r\n self.set_ud_config(ud_config)\r\r\n self.modemObj.set_ud_config(ud_config)\r\r\n self.instr.lte_tx.set_ul_dl_conf(ud_config)\r\r\n\r\r\n self.modemObj.enable_tx()\r\r\n\r\r\n bursted = not (ud_config==\"TEST0\" or ud_config==\"TEST1\")\r\r\n self.setup_tdd_trigger(bursted,special_sf_config)\r\r\n\r\r\n self.modemObj.set_special_sf_config(special_sf_config)\r\r\n self.instr.lte_tx.set_special_subframe_conf(special_sf_config)\r\r\n\r\r\n self.modemObj.set_ul_timing_advance(ul_timing_advance)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.instr.lte_tx.set_rf_exp_power(power_dBm=powerdBm+5)\r\r\n self.instr.waitForCompletion()\r\r\n\r\r\n if with_rx:\r\r\n assert(bursted)\r\r\n self.modemObj.set_freqMHz(direction='rx',freqMHz=freq_dl)\r\r\n self.modemObj.set_rxagc_auto(ant='m')\r\r\n self.modemObj.enable_rx(ant='m')\r\r\n\r\r\n self.set_test_afc_val()\r\r\n\r\r\n return bursted", "def main():\n\n # performs crib dragging using initial values\n plaintext1, plaintext2 = crib_drag('', '', 0, 0)\n\n if plaintext1 is None or plaintext2 is None:\n print('No possible English decryption using the current dictionary')\n return\n\n # find the key and creates file with results\n plaintext1 = plaintext1[:CIPHER_LEN]\n plaintext2 = plaintext2[:CIPHER_LEN]\n key = find_key(plaintext1, plaintext2)\n\n with open('plaintext1.txt', 'w') as plain_file:\n plain_file.write(plaintext1)\n with open('plaintext2.txt', 'w') as plain_file:\n plain_file.write(plaintext2)\n with open('key.txt', 'wb') as plain_file:\n plain_file.write(key)", "def TestTDDFT():\n prm = '''\n Model\tTDHF\n Method\tMMUT\n dt\t0.02\n MaxIter\t100\n ExDir\t1.0\n EyDir\t1.0\n EzDir\t1.0\n FieldAmplitude\t0.01\n FieldFreq\t0.9202\n ApplyImpulse\t1\n ApplyCw\t\t0\n StatusEvery\t10\n '''\n geom = \"\"\"\n H 0. 0. 0.\n H 0. 0. 0.9\n H 2.0 0. 0\n H 2.0 0.9 0\n \"\"\"\n output = re.sub(\"py\",\"dat\",sys.argv[0])\n mol = gto.Mole()\n mol.atom = geom\n mol.basis = 'sto-3g'\n mol.build()\n the_scf = pyscf.dft.RKS(mol)\n the_scf.xc='HF'\n print \"Inital SCF finished. E=\", the_scf.kernel()\n aprop = tdscf.tdscf(the_scf,prm,output)\n return", "def test_single_dft():\n test_file = os.path.join(DATA_DIR, 'test39_dft.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -4.8538264773648E+02 * Ha # energy in eV\n assert info['k'] == '6x6x6' # Monkhorst-Pack net\n assert info['H'] == \"LDA/PZ_LDA\"\n assert info['ncycles'][0] == 9\n assert info['electrons']['basis_set']['ecp']['Ge'][0][1] == (0.82751, -1.26859, -1)\n assert info['electrons']['basis_set']['bs']['Ge'][0][1] == (1.834, 0.4939, 0.006414)", "def enable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = True\n\t\tself.change_TTS_engine()", "def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt", "def tdd():\n\n with lcd(FRONTENDDIR):\n cmd = '%(gulp)s tdd' % {'gulp': get_gulp()}\n local(cmd)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = False\n\t\tself.change_TTS_engine()", "def main(ctx, access_key, host, debug):\n info = {\n \"access_key\": access_key,\n \"host\": host,\n \"DEBUG\": debug\n }\n\n _tda = None\n\n if access_key == \"\":\n configFile = _config_filepath()\n if os.path.exists(configFile):\n with open(configFile, \"r\", encoding=\"utf-8\") as cf:\n if cf.read() != \"\":\n info = _getConf()\n info[\"DEBUG\"] = debug\n\n if info[\"access_key\"] != \"\":\n _tda = TDA(info[\"access_key\"], info[\"host\"])\n if info[\"DEBUG\"]:\n _tda.Debug()\n\n ctx.obj = _tda", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def encrypt():\n print(\"Use sops to encrypt the file.\")\n print(\"Learn more at https://github.com/mozilla/sops\")", "def test_tte4(self):\n filename = str(self.temp_j2k_filename)\n xtx4_setup(filename)\n self.assertTrue(True)", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t# #Start Checking the Platform\n\t\t\t# if platform.system() == 'Windows':\n\t\t\t# \tself.path = self.path.split('\\\\')[-1]\n\t\t\t# elif platform.system() == 'Linux':\n\t\t\t# \tself.path = self.path.split('/')[-1]\n\t\t\t# # END Checking\n\t\t\t# print('Decryption of '+ self.path +\"...\")\n\t\t\t######################### Blowfish Decryption Algorithm ###############\n\t\t\tbs = Blowfish.block_size\n\t\t\trealData = base64.b64decode(file_data)[8:]\n\t\t\tiv = base64.b64decode(file_data)[:8]\n\t\t\tdecrypt = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tself.decrypt = decrypt.decrypt(realData)\n\t\t\t########################### End Blowfish #########################\n\t\t\t#print('Writing in your file...')\n\t\t\tself.out = self.path.replace(self.extension,'')\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.out,'wb') as outfile:\n\t\t\t\toutfile.write(self.decrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint(\"Done in \",time.time() - t)\n\t\t\t\n\t\telse:\n\t\t\tprint('The File is Not Encrypted To Decrypted.')", "def get_TTS_data(self, exchanged_token, exchange=False):\n if os.path.exists(self.lock_file):\n ctime = os.stat(self.lock_file).st_ctime\n age = time.time() - ctime\n if age < self.age:\n self.log.error(\"Update already in progres. Sleeping ..\")\n time.sleep(self.age - age)\n else:\n self.log.error(\"Stale lock file, removing ...\")\n os.remove(self.lock_file)\n open(self.lock_file, 'w+').close()\n\n if exchange:\n with file('/tmp/refresh_token') as f:\n refresh_token = f.read()\n self.exchanged_token = self.refresh_token(self.client_id, self.client_secret, refresh_token.strip())\n if isinstance(self.exchanged_token, int):\n self.log.error(\"refresh_token error\")\n\n if self.get_certificate(self.credential_endpoint):\n # load json and prepare objects\n with open('/tmp/output.json') as tts_data_file:\n tts_data = json.load(tts_data_file)\n \n f = open(self.user_cert, 'w+')\n f.write(str(tts_data['credential']['entries'][0]['value']))\n f.close()\n \n f = open(self.user_key, 'w+')\n f.write(str(tts_data['credential']['entries'][1]['value']))\n f.close()\n \n f = open(self.user_passwd, 'w+')\n f.write(str(tts_data['credential']['entries'][2]['value']))\n f.close()\n \n try:\n os.chmod(self.user_key, 0600)\n except OSError, e:\n self.log.error(e)\n self.log.error(\"Permission denied to chmod passwd file\")\n return False\n \n os.remove(self.lock_file)\n \n return True\n else:\n return False", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(path, key, default, output, url, token, vaultpath):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n data = get_config(path, file_type, default=False)\n data = decrypt_credentials(data, key)\n\n # Only merge the DEFAULT section after decrypting.\n if default:\n data = merge_default(data)\n\n if url:\n try:\n import hvac\n except:\n print('''\nTo use Hashicorp's Vault you must install the hvac package.\nTo install it try using the following command:\n\n pip install hvac\n''')\n exit(3)\n\n if not token:\n token = os.environ.get('VAULT_TOKEN', '')\n if not token:\n token = getpass('Vault token: ')\n \n client = hvac.Client(url=url, token=token)\n if not vaultpath:\n vaultpath = path\n\n if vaultpath[0] == '~':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '/':\n vaultpath = vaultpath[1:]\n\n data = merge_default(data)\n for heading in data:\n # kargs = { heading: json.dumps(data[heading]) }\n client.write(vaultpath + '/' + heading, **data[heading])\n\n else:\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n config_ini = configparser.ConfigParser(\n dict_type=OrderedDict,\n default_section=default_section,\n interpolation=None)\n for heading in data:\n config_ini.add_section(heading)\n for item in data[heading]:\n config_ini.set(heading, item, data[heading][item])\n config_ini.write(save_file)\n\n else:\n write_yaml(save_file, data)", "def showTF(tf,outDir):\n\n nlo2lo,data2lo,data2nlo,data2lo_A,data2nlo_A=tf\n\n c=ROOT.TCanvas('c','c',500,500)\n c.SetBottomMargin(0)\n c.SetTopMargin(0)\n c.SetLeftMargin(0)\n c.SetRightMargin(0)\n c.cd()\n\n p1=ROOT.TPad('p1','p1',0,0.5,1,1.0)\n p1.Draw()\n p1.SetRightMargin(0.03)\n p1.SetLeftMargin(0.12)\n p1.SetTopMargin(0.1)\n p1.SetBottomMargin(0.01)\n p1.SetGridy()\n p1.cd()\n nlo2lo.Draw('e2')\n nlo2lo.GetYaxis().SetTitle('Z ratio')\n nlo2lo.GetYaxis().SetNdivisions(5)\n nlo2lo.GetXaxis().SetTitleSize(0)\n nlo2lo.GetXaxis().SetLabelSize(0)\n nlo2lo.GetYaxis().SetTitleSize(0.08)\n nlo2lo.GetYaxis().SetTitleOffset(0.8)\n nlo2lo.GetYaxis().SetLabelSize(0.08)\n nlo2lo.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo.Draw('e1same')\n data2nlo.Draw('e1same')\n\n leg1=p1.BuildLegend(0.7,0.88,0.95,0.66)\n leg1.SetFillStyle(0)\n leg1.SetBorderSize(0)\n leg1.SetTextFont(42)\n leg1.SetTextSize(0.06)\n\n l1=ROOT.TLine()\n l1.SetLineWidth(2)\n l1.SetLineColor(ROOT.kBlue)\n l1.DrawLine(data2lo.GetXaxis().GetXmin(),1,data2lo.GetXaxis().GetXmax(),1)\n\n txt=ROOT.TLatex()\n txt.SetNDC(True)\n txt.SetTextFont(42)\n txt.SetTextSize(0.08)\n txt.SetTextAlign(12)\n txt.DrawLatex(0.12,0.95,'#bf{CMS} #it{preliminary}')\n p1.RedrawAxis()\n\n c.cd()\n p2=ROOT.TPad('p2','p2',0,0,1,0.5)\n p2.SetRightMargin(0.03)\n p2.SetLeftMargin(0.12)\n p2.SetTopMargin(0.01)\n p2.SetBottomMargin(0.18)\n p2.SetGridy()\n p2.Draw()\n p2.cd()\n data2lo_A.Draw('e1')\n data2lo_A.GetYaxis().SetTitle('#gamma ratio')\n data2lo_A.GetYaxis().SetNdivisions(5)\n data2lo_A.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo_A.GetXaxis().SetTitleSize(0.08)\n data2lo_A.GetXaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleSize(0.08)\n data2lo_A.GetYaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleOffset(0.8)\n data2nlo_A.Draw('e1same')\n \n leg2=p2.BuildLegend(0.7,0.94,0.95,0.80)\n leg2.SetFillStyle(0)\n leg2.SetBorderSize(0)\n leg2.SetTextFont(42)\n leg2.SetTextSize(0.06)\n \n l2=ROOT.TLine()\n l2.SetLineColor(ROOT.kBlue)\n l2.SetLineWidth(2)\n l2.DrawLine(data2lo_A.GetXaxis().GetXmin(),1,data2lo_A.GetXaxis().GetXmax(),1)\n\n p2.RedrawAxis()\n\n c.cd()\n c.Modified()\n c.Update()\n for ext in ['png','pdf']:\n c.SaveAs('{0}.{1}'.format(outDir,ext))", "def enable_tee(self):\n self._tee = True", "def test_simulation_persistence(compression, tmp_path):\n path = tmp_path / \"test_simulation_persistence.hdf5\"\n storage = FileStorage(path, compression=compression)\n\n # write some simulation data\n pde = DiffusionPDE()\n grid = UnitGrid([16, 16]) # generate grid\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n pde.solve(state, t_range=0.11, dt=0.001, tracker=storage.tracker(interval=0.05))\n storage.close()\n\n # read the data\n storage = FileStorage(path)\n np.testing.assert_almost_equal(storage.times, [0, 0.05, 0.1])\n data = np.array(storage.data)\n assert data.shape == (3,) + state.data.shape\n grid_res = storage.grid\n assert grid == grid_res\n grid_res = storage.grid\n assert grid == grid_res", "def louder():\n try:\n ttsEng.louder()\n except Exception, e:\n logging.error(e)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def tesselate(options):\n if not options.freplace:\n if len(options.args) != 2:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file and '\n 'one output slf file\\n')\n i3s_file = options.args[0]\n out_file = options.args[1]\n else:\n if len(options.args) != 1:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file\\n')\n i3s_file = options.args[0]\n head, _ = path.splitext(i3s_file)\n out_file = head+'.slf'\n\n i3s_file = path.realpath(i3s_file)\n if not path.exists(i3s_file):\n raise TelemacException(\\\n '\\nCould not find '\n 'the file named: {}'.format(i3s_file))\n\n print('\\n\\nTessellating ' + path.basename(i3s_file) + ' within ' + \\\n path.dirname(i3s_file) + '\\n'+'~'*72+'\\n')\n i2s = InS(i3s_file)\n ikle2, ipob2, meshx, meshy = tessellate_poly(i2s, debug=True)\n\n print('\\n\\nWriting down the Selafin file ' + \\\n path.basename(out_file) + '\\n'+'~'*72+'\\n')\n slf = Selafin('')\n slf.title = ''\n slf.nplan = 1\n slf.ndp2 = 3\n slf.ndp3 = 3\n slf.nbv1 = 1\n slf.nvar = 1\n slf.varindex = 1\n slf.varnames = ['BOTTOM ']\n slf.varunits = ['M ']\n slf.ikle2 = ikle2\n slf.ikle3 = slf.ikle2\n slf.meshx = meshx\n slf.meshy = meshy\n slf.npoin2 = i2s.npoin\n slf.npoin3 = slf.npoin2\n slf.nelem2 = len(slf.ikle2)/slf.ndp3\n slf.nelem3 = slf.nelem2\n slf.iparam = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\n slf.ipob2 = ipob2\n slf.ipob3 = slf.ipob2\n slf.fole = {'hook':open(out_file, 'wb'), 'endian':\">\",\n 'float':('f', 4), 'name':out_file}\n slf.tags['times'] = [1]\n if options.sph2ll != None:\n radius = 6371000.\n long0, lat0 = options.sph2ll.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n const = np.tan(lat0/2. + np.pi/4.)\n slf.meshx = np.rad2deg(slf.meshx/radius + long0)\n slf.meshy = np.rad2deg(2.*np.arctan(const*np.exp(slf.meshy/radius)) \\\n - np.pi/2.)\n if options.ll2sph != None:\n radius = 6371000.\n long0, lat0 = options.ll2sph.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n slf.meshx = radius * (np.deg2rad(slf.meshx) - long0)\n slf.meshy = radius * \\\n (np.log(np.tan(np.deg2rad(slf.meshy)/2. + np.pi/4.)) \\\n - np.log(np.tan(lat0/2. + np.pi/4.)))\n if options.ll2utm != None:\n zone = int(options.ll2utm)\n slf.meshx, slf.meshy, zone = utm.from_lat_long(slf.meshx, slf.meshy,\n zone)\n if options.utm2ll != None:\n zone = int(options.utm2ll)\n slf.meshx, slf.meshy = utm.to_lat_long(slf.meshx, slf.meshy, zone)\n slf.append_header_slf()\n slf.append_core_time_slf(0)\n slf.append_core_vars_slf([np.zeros(slf.npoin2)])\n slf.fole['hook'].close()", "def decrypt(self, data):", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def crypto_run(node_name, op_type, key, iv, in_text, out_text, tpm):\n mode_name, submode_name = node_name.split(':')\n submode_name = submode_name[:3].upper()\n\n mode = SUPPORTED_MODES.get(mode_name.upper())\n if not mode:\n raise subcmd.TpmTestError('unrecognizable mode in node \"%s\"' % node_name)\n\n submode = mode.submodes.get(submode_name, 0)\n cmd = '%c' % op_type # Encrypt or decrypt\n cmd += '%c' % submode # A particular type of a generic algorithm.\n cmd += '%c' % len(key)\n cmd += key\n cmd += '%c' % len(iv)\n if iv:\n cmd += iv\n cmd += struct.pack('>H', len(in_text))\n cmd += in_text\n if tpm.debug_enabled():\n print('%d:%d cmd size' % (op_type, mode.subcmd),\n len(cmd), utils.hex_dump(cmd))\n wrapped_response = tpm.command(tpm.wrap_ext_command(mode.subcmd, cmd))\n real_out_text = tpm.unwrap_ext_response(mode.subcmd, wrapped_response)\n if out_text:\n if len(real_out_text) > len(out_text):\n real_out_text = real_out_text[:len(out_text)] # Ignore padding\n if real_out_text != out_text:\n if tpm.debug_enabled():\n print('Out text mismatch in node %s:\\n' % node_name)\n else:\n raise subcmd.TpmTestError(\n 'Out text mismatch in node %s, operation %d:\\n'\n 'In text:%sExpected out text:%sReal out text:%s' % (\n node_name, op_type,\n utils.hex_dump(in_text),\n utils.hex_dump(out_text),\n utils.hex_dump(real_out_text)))\n return real_out_text", "def test(DATASET=\"Texas\", CONFIG=None):\n if CONFIG is None:\n CONFIG = get_config_kACE(DATASET)\n print(f\"Loading {DATASET} data\")\n x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)\n if tf.config.list_physical_devices(\"GPU\") and not CONFIG[\"debug\"]:\n C_CODE = 3\n print(\"here\")\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [50, 50, C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [50, 50, C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_Y]},\n }\n else:\n print(\"why here?\")\n C_CODE = 1\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [C_Y]},\n }\n print(\"Change Detector Init\")\n cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)\n print(\"Training\")\n training_time = 0\n cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)\n for epochs in CONFIG[\"list_epochs\"]:\n CONFIG.update(epochs=epochs)\n tr_gen, dtypes, shapes = datasets._training_data_generator(\n x_im[0], y_im[0], cross_loss_weight[0], CONFIG[\"patch_size\"]\n )\n TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)\n TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)\n for x, y, _ in EVALUATE.batch(1):\n alpha = cd([x, y])\n cross_loss_weight = 1.0 - alpha\n training_time += tr_time\n\n cd.load_all_weights(cd.log_path)\n cd.final_evaluate(EVALUATE, **CONFIG)\n metrics = {}\n for key in list(cd.difference_img_metrics.keys()) + list(\n cd.change_map_metrics.keys()\n ):\n metrics[key] = cd.metrics_history[key][-1]\n metrics[\"F1\"] = metrics[\"TP\"] / (\n metrics[\"TP\"] + 0.5 * (metrics[\"FP\"] + metrics[\"FN\"])\n )\n timestamp = cd.timestamp\n epoch = cd.epoch.numpy()\n speed = (epoch, training_time, timestamp)\n del cd\n gc.collect()\n return metrics, speed", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def enableDestruction(self):\n self.destructable = True", "def set_dft(self, value):\n self.dft = value", "def SPIjedec(self):\n data=[0x9f, 0, 0, 0];\n data=self.SPItrans(data);\n jedec=0;\n self.JEDECmanufacturer=ord(data[1]);\n if self.JEDECmanufacturer==0xFF:\n self.JEDECtype=0x20;\n self.JEDECcapacity=0x14;\n jedec=0x202014;\n else:\n self.JEDECtype=ord(data[2]);\n self.JEDECcapacity=ord(data[3]);\n jedec=(ord(data[1])<<16)+(ord(data[2])<<8)+ord(data[3]);\n self.JEDECsize=self.JEDECsizes.get(self.JEDECcapacity);\n if self.JEDECsize==None:\n self.JEDECsize=0;\n \n if jedec==0x1F4501:\n self.JEDECsize=1024**2;\n self.JEDECdevice=jedec;\n return data;", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def _phi_ode(self, t, z):\n # prep states and phi_matrix\n state = z[0:len(self.istate)]\n ad_state = make_ad(state)\n phi_flat = clear_ad(z[len(self.istate):])\n phi = np.reshape(phi_flat, (len(self.istate),\n len(self.istate)))\n\n # find the accelerations and jacobian\n state_deriv = self.force_model.ode(0, ad_state)\n a_matrix = jacobian(self.force_model.ode(0, ad_state),\n ad_state)\n\n # compute the derivative of the STM and repackage\n phid = np.matmul(a_matrix, phi)\n phid_flat = phid.flatten()\n z_out = np.concatenate((state_deriv, phid_flat))\n\n return z_out", "def test_tte2(self):\n filename = str(self.temp_j2k_filename)\n xtx2_setup(filename)\n self.assertTrue(True)", "def test_encrypt_decrypt(self):\n with open(self.file_path, \"rt\") as file:\n start_file = file.read()\n nonce1 = globals.generate_random_nonce()\n nonce2 = globals.generate_random_nonce()\n encrypted_file_path, additional_data = self.file_crypt.encrypt_file(\n self.file_path,\n nonce1,\n nonce2)\n file_decrypted = self.file_crypt.decrypt_file(\n file_path=encrypted_file_path,\n additional_data=additional_data)\n with open(file_decrypted, \"rt\") as file:\n end_file = file.read()\n self.assertEqual(start_file, end_file, \"Files differ!\")", "def create_tvel_file(\n depth: np.array,\n vp: np.array,\n vs: np.array,\n dens: np.array,\n save_folder: str,\n name: str = \"Test\",\n):\n\n assert (\n len(depth) == len(vp) and len(depth) == len(vs) and len(depth) == len(dens)\n ), \"All arrays (depth, vp, vs and dens) should be of same length\"\n\n \"\"\" combining all the data vector \"\"\"\n data = np.vstack((np.vstack((np.vstack((depth, vp)), vs)), dens)).T\n\n with open(join(save_folder, f\"{name}.tvel\"), \"w\") as f:\n f.write(\"# Input file for TauP\\n\")\n f.write(\"NAME TAYAK_BKE\\n\")\n for line in data:\n f.write(f\"{line[0]:8.2f}{line[1]:8.3f}{line[2]:8.3f}{line[3]:8.3f}\\n\")\n f.write(\n \"\"\" 1596.98 4.986 0.000 5.855\n 1853.05 5.150 0.000 6.025\n 2109.13 5.284 0.000 6.166\n 2365.20 5.393 0.000 6.280\n 2621.27 5.475 0.000 6.368\n 2877.35 5.534 0.000 6.430\n 3133.42 5.569 0.000 6.467\n 3389.50 5.569 0.000 6.467\"\"\"\n )\n f.close()", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def t_xtide(*varargin):\n nargin = len(varargin)\n if nargin > 0:\n varargin = varargin[0]\n if not os.path.exists('t_xtide.mat'):\n # Read the harmonics file and make a mat file\n filnam = '/usr/share/xtide/harmonics.txt'\n fprintf(\"\\\\n********Can't find mat-file t_xtide.mat ********\\\\n\\\\n\")\n fprintf('Attempting to generate one from an xtide harmonics file....\\\\n\\\\n')\n fprintf('Latest version available from http://bel-marduk.unh.edu/xtide/files.html\\\\n\\\\n')\n # Input name\n fid = - 1\n while fid == - 1:\n\n rep = filnam\n while (lower(rep[0]) != 'y'):\n\n filnam = rep\n rep = 'n'\n rep = input_('Harmonics filename: ' + filnam + '? (y/Y/new file name):', 's')\n if (0 in rep.shape):\n rep = 'y'\n\n fid = open(filnam)\n if fid == - 1:\n fprintf(\"\\\\n****** Can't open filename ->\" + filnam + '<-\\\\n\\\\n')\n\n fprintf('Reading harmonics file (this will take a while)\\\\n')\n xtide, xharm = read_xtidefile(fid) # nargout=2\n fprintf('Saving harmonic information to t_xtide.mat\\\\n')\n savemat('t_xtide', 'xtide', 'xharm')\n else:\n loadmat('t_xtide',matlab_compatible=True)\n if nargin > 0:\n if isstr(varargin[0]):\n # Station name given\n # Identify station - look for exact match first\n ista = strmatch(lower(varargin[0]), lower(xharm.station), 'exact')\n # otherwise go for partial matches\n if (0 in ista.shape):\n # First check to see if a number was selected:\n inum = - 10\n while inum < - 1:\n\n inum = inum + 1\n ll = findstr(lower(varargin[0]), sprintf('(\\n %d)', - inum))\n if not (0 in ll.shape):\n inum = abs(inum)\n varargin[0] = deblank(varargin[0](range(1, (ll - 1 +1))))\n\n ista = strmatch(lower(varargin[0]), lower(xharm.station))\n if max(ista.shape) > 1:\n if inum > 0 & inum <= max(ista.shape):\n ista = ista[(inum -1)]\n else:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n if max(ista.shape) == 1 & inum > 1:\n fprintf(\"***Can't find variant (\\n %d) of station - Taking only choice\\\\n\", inum)\n else:\n if max(ista.shape) == 0:\n error('Could not match station')\n varargin[0] = np.array([])\n else:\n # Lat/long?\n dist, hdg = t_gcdist(xharm.latitude, xharm.longitude, varargin[1], varargin[0]) # nargout=2\n mind, ista = np.min(dist) # nargout=2\n if max(ista.shape) > 1:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n fprintf('\\n %5d: \\n %s\\\\n', ista, deblank(xharm.station(ista, :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista), xharm.latitude(ista))\n varargin[0:2] = np.array([])\n # Time vector (if available) otherwise take current time.\n if max(varargin.shape) > 0 & not isstr(varargin[0]):\n tim = varargin[0]\n tim = tim[:].T\n varargin[0] = np.array([])\n if max(tim.shape) == 1:\n if tim < 1000:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, (tim +1), 1 / 48)]).reshape(1, -1)\n else:\n tim = tim + np.array([range(0, 3, 1 / 48)]).reshape(1, -1)\n # 2 days worth.\n else:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, 49, 0.25)]).reshape(1, -1) / 24\n # Parse properties\n format_ = 'raw'\n unt = 'original'\n k = 1\n while max(varargin.shape) > 0:\n\n if 'for' == lower(varargin[-1](range(1, 4))):\n format_ = lower(varargin[1])\n else:\n if 'uni' == lower(varargin[-1](range(1, 4))):\n unt = lower(varargin[1])\n else:\n error(\"Can't understand property:\" + varargin[0])\n varargin[(np.array([1, 2]).reshape(1, -1) -1)] = np.array([])\n\n # if we want a time series\n pred = np.array([])\n # Convert units if requested.\n units, convf = convert_units(unt, xharm.units(ista, :)) # nargout=2\n if format_[0:2] == 'ra' | format_[0:2] == 'fu' | format_[0:2] == 'ti':\n # Data every minute for hi/lo forecasting.\n if format_[0:2] == 'ti':\n tim = range(tim[0], (tim[-1] +1), (1 / 1440))\n # Convert into time since the beginning of year\n mid = datevec(mean(tim))\n iyr = mid[0] - xtide.startyear + 1\n lt = max(tim.shape)\n xtim = np.dot((tim - datenum(mid[0], 1, 1)), 24)\n # Hours since beginning of year\n #-----------------------------------------------------\n # Sum up everything for the prediction!\n pred = xharm.datum(ista) + np.sum(repmat(xtide.nodefactor(:, iyr) * xharm.A(ista, :).T, 1, lt) * cos(np.dot((np.dot(xtide.speed, xtim) + repmat(xtide.equilibarg(:, iyr) - xharm.kappa(ista, :).T, 1, lt)), (pi / 180))), 1)\n #-----------------------------------------------------\n pred = np.dot(pred, convf)\n # Compute times of hi/lo from every-minute data\n if format_[0:2] == 'ti':\n # Check if this is a current station\n if not (0 in findstr('Current', xharm.station(ista, :)).shape):\n currents = 1\n else:\n currents = 0\n dpred = diff(pred)\n ddpred = diff(dpred > 0)\n flat = np.flatnonzero(ddpred != 0) + 1\n slk = np.flatnonzero(sign(pred[0:pred.shape[0] - 1]) != sign(pred[1:pred.shape[0]]))\n hi.mtime = tim[(flat -1)]\n hi.value = pred[(flat -1)]\n hi.type = np.zeros(shape=(flat.shape, flat.shape), dtype='float64')\n hi.type(np.flatnonzero(ddpred[(flat - 1 -1)] < 0)) = 1\n # 0=lo, 1=hi\n hi.units = deblank(units)\n pred = hi\n # Create information structure\n if format_[0:2] == 'in' | format_[0:2] == 'fu':\n if not (0 in pred.shape):\n pred.yout = pred\n pred.mtime = tim\n else:\n kk = np.flatnonzero(xharm.A(ista, :) != 0)\n pred.freq = xtide.name(kk, :)\n pred.A = np.dot(full(xharm.A(ista, kk).T), convf)\n pred.kappa = full(xharm.kappa(ista, kk).T)\n pred.station = deblank(xharm.station(ista, :))\n pred.longitude = xharm.longitude(ista)\n pred.latitude = xharm.latitude(ista)\n pred.timezone = xharm.timezone(ista)\n pred.units = deblank(units)\n pred.datum = np.dot(xharm.datum(ista), convf)\n # If no output parameters then we plot or display things\n if nargout == 0:\n if 'ti' == format_[(((0:2 -1) -1) -1)]:\n fprintf('High/Low Predictions for \\n %s\\\\n', xharm.station(ista, :))\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', xharm.timezone(ista))\n outstr = repmat(' ', max(flat.shape), 41)\n outstr[:, 0:20] = datestr(hi.mtime)\n outstr[:, 21:27] = reshape(sprintf('\\n %6.2f', hi.value), 6, max(flat.shape)).T\n if currents:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' Flood Tide', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Ebb Tide ', np.sum(ll), 1)\n else:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' High Tide ', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Low Tide ', np.sum(ll), 1)\n disp(outstr)\n else:\n if 'ra' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'fu' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred.yout)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'in' == format_[(((0:2 -1) -1) -1)]:\n fprintf('Station: \\n %s\\\\n', pred.station)\n if pred.longitude < 0:\n lon = 'W'\n else:\n lon = 'E'\n if pred.latitude < 0:\n lat = 'S'\n else:\n lat = 'N'\n fprintf(\"Location: \\n %d \\n %.1f' \\n %c, \\n %d \\n %.1f' \\n %c\\\\n\", fix(abs(pred.latitude)), np.dot(rem(abs(pred.latitude), 1), 60), lat, fix(abs(pred.longitude)), np.dot(rem(abs(pred.longitude), 1), 60), lon)\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', pred.timezone)\n clear('pred')\n #\n return pred", "def decryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n\n dec = Decrypt(infile)\n\n if mode.upper() == 'AES':\n decrypted_data = dec.AES(password)\n elif mode.upper() == 'DES':\n decrypted_data = dec.DES(password)\n elif mode.upper() == 'SALSA20':\n decrypted_data = dec.Salsa20(password)\n else:\n return 2\n\n if not decrypted_data:\n cleanup(outfile)\n return 3\n\n if not outfile.endswith(dec.extension):\n outfile += dec.extension\n write_data(decrypted_data, outfile)\n return 0", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def _dK_ode_dtheta(self, target):\r\n t_ode = self._t[self._index>0]\r\n dL_dK_ode = self._dL_dK[self._index>0, :]\r\n index_ode = self._index[self._index>0]-1\r\n if self._t2 is None:\r\n if t_ode.size==0:\r\n return \r\n t2_ode = t_ode\r\n dL_dK_ode = dL_dK_ode[:, self._index>0]\r\n index2_ode = index_ode\r\n else:\r\n t2_ode = self._t2[self._index2>0]\r\n dL_dK_ode = dL_dK_ode[:, self._index2>0]\r\n if t_ode.size==0 or t2_ode.size==0:\r\n return\r\n index2_ode = self._index2[self._index2>0]-1\r\n\r\n h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True)\r\n #self._dK_ddelay = self._dh_ddelay\r\n self._dK_dsigma = self._dh_dsigma\r\n\r\n if self._t2 is None:\r\n h2 = h1\r\n else:\r\n h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True)\r\n\r\n #self._dK_ddelay += self._dh_ddelay.T\r\n self._dK_dsigma += self._dh_dsigma.T\r\n # C1 = self.sensitivity\r\n # C2 = self.sensitivity\r\n\r\n # K = 0.5 * (h1 + h2.T)\r\n # var2 = C1*C2\r\n # if self.is_normalized:\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2\r\n # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2\r\n # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma))\r\n # dk_dC1 = C2 * sum(sum(dL_dK.*K))\r\n # dk_dC2 = C1 * sum(sum(dL_dK.*K))\r\n # else:\r\n # K = np.sqrt(np.pi) * K\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K))\r\n # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K))\r\n\r\n\r\n # dk_dSim1Variance = dk_dC1\r\n # Last element is the length scale.\r\n (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2)\r\n\r\n target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum()\r\n\r\n\r\n # # only pass the gradient with respect to the inverse width to one\r\n # # of the gradient vectors ... otherwise it is counted twice.\r\n # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance])\r\n # g2 = real([dk_dD2 0 dk_dSim2Variance])\r\n # return g1, g2\"\"\"\r", "def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]", "def example():\r\n path = os.path.abspath(os.path.dirname(__name__))\r\n module = CryptoModule()\r\n # create_name this is open source py module with confidential information\r\n opened_path = os.path.join(path, 'secret.py')\r\n # read_name this is open encrypted py module with confidential information\r\n secured_path = os.path.join(path, 'secured.py')\r\n # encrypt, read secret.py and create secured.py\r\n module.create_secured_module(path_to_opened_module=opened_path, path_to_secured_module=secured_path,\r\n create_key=True, delete_source_opened_module=False)\r\n # decrypt, read secured.py and create opened.py\r\n module.create_opened_module(path_to_secured_module=secured_path, path_to_opened_module=opened_path)\r\n print('ok')", "def test_tte1(self):\n filename = str(self.temp_j2k_filename)\n self.xtx1_setup(filename)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def detx(self, det_id, t0set=None, calibration=None):\n url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args\n if t0set is not None:\n url += '&t0set=' + t0set\n if calibration is not None:\n url += '&calibrid=' + calibration\n\n detx = self._get_content(url)\n return detx", "def unfreeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1\"\n ).wait()", "def main():\n \n # Ask for their option.\n \n inputFile = \"\"\n outputFile = \"\"\n \n choice = askOption()\n key = askForKey()\n \n inputFile = askInputFile()\n inputText = readText(inputFile)\n \n outputFile = askOutputFile()\n \n #Start the timer here.\n startTimer = time.time()\n \n # Depending on their choice, encode or decode.\n if choice == 'e':\n encryptedText = RouteCipher.encrypt(inputText, key)\n writeText(encryptedText, outputFile)\n elif choice == 'd':\n decryptedText = RouteCipher.decrypt(inputText, key)\n writeText(decryptedText, outputFile)\n \n finishTimer = time.time()\n totalTime = round(finishTimer - startTimer, 2)\n \n print(\"The operation was succesful\")\n print(f\"Total time needed: {totalTime}\")", "def StoreAntirollback(now, ar_filename, kern_f):\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)", "def test_dfunction_saveable(self):\n \n wa = FrequencyAxis(0.0, 1000, 0.1)\n \n fw = numpy.exp(-wa.data)\n \n fce = DFunction(wa,fw)\n \n #fce.plot()\n\n #with h5py.File(\"test_file_1\",driver=\"core\", \n # backing_store=False) as f:\n with tempfile.TemporaryFile() as f:\n \n fce.save(f, test=True)\n \n fce2 = DFunction()\n fce2 = fce2.load(f, test=True)\n \n #fce2.plot()\n \n numpy.testing.assert_array_equal(fce.data, fce2.data)", "def vmdexec(cmds):\n handle,filename=mkstemp(dir='/tmp')\n open(filename,'w').write(cmds)\n os.system('vmd -dispdev text -e %s'%filename) # run vmd in the terminal\n os.system('/bin/rm %s'%filename) # clean-up", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "def construct_TDI(self, t, Orbit):\n\t\n\tself.make_padded_delta_l(t)\n\n\tp12 = td.Phase(1,2, t, self.delta_l_padded[0,1,:])\n\tp21 = td.Phase(2,1, t, self.delta_l_padded[1,0,:])\n\n\tp13 = td.Phase(1,3, t, self.delta_l_padded[0,2,:])\n\tp31 = td.Phase(3,1, t, self.delta_l_padded[2,0,:])\n\n\tp23 = td.Phase(2,3, t, self.delta_l_padded[1,2,:])\n\tp32 = td.Phase(3,2, t, self.delta_l_padded[2,1,:])\n \n\tp12.FT_phase(Orbit)\n\tp21.FT_phase(Orbit)\n\tp13.FT_phase(Orbit)\n\tp31.FT_phase(Orbit)\n\tp23.FT_phase(Orbit)\n\tp32.FT_phase(Orbit)\n\n\ttdi_GW = td.TDI(p12, p21, p13, p31, p23, p32, Orbit)\n\t\n\treturn tdi_GW", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def dftb_dftd3(third_ord, damp_flag, damp_exp):\n dftb_dftd3=\"\"\"\n ThirdOrderFull = {{ third_ord }}\n DampXH = {{ damp_flag }}\n DampXHExponent = {{ damp_exp }}\n Dispersion = DftD3{}\n}\n \"\"\"\n return Environment().from_string(dftb_dftd3).render(third_ord=third_ord, damp_flag=damp_flag, damp_exp=damp_exp)", "def test_encrypt_decrypt(self):\n reference = get_random_test_tensor()\n encrypted_tensor = SharedTensor(reference)\n self._check(encrypted_tensor, reference, 'en/decryption failed')", "def test_default_run_ubuntu_keep_vdmx():\n test_dir = os.path.join(\"tests\", \"test_files\", \"fonts\", \"temp\")\n notouch_inpath = os.path.join(\"tests\", \"test_files\", \"fonts\", \"Ubuntu-Regular.ttf\")\n test_inpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular.ttf\"\n )\n test_outpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular-dehinted.ttf\"\n )\n test_args = [test_inpath, \"--keep-vdmx\"]\n\n # setup\n if os.path.isdir(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n shutil.copyfile(notouch_inpath, test_inpath)\n\n # execute\n run(test_args)\n\n # test\n tt = TTFont(test_outpath)\n assert \"VDMX\" in tt\n\n # tear down\n shutil.rmtree(test_dir)", "def direct_mode_test(self,earfcn,bwMhz,powerdBm,ud_config,sf_sweep=False,with_rx=False):\r\r\n\r\r\n self.meas_list = ['FREQ_ERR','IQ_OFFSET', 'EVM']\r\r\n tol_dB = 1\r\r\n\r\r\n bursted = self.setup_tdd(earfcn,bwMhz,powerdBm,ud_config,with_rx=with_rx)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n\r\r\n # Note - Direct AGC value leads to different powers on different platforms\r\r\n # -- use driver mode and read back AGC value to get baseline,\r\r\n # then try that value in direct mode.\r\r\n dac_value = self.modemObj.query_txagc()\r\r\n\r\r\n # Set minimum power\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n # Set the original power, but as a direct gain DAC word this time.\r\r\n self.modemObj.set_txagc_direct(value=dac_value)\r\r\n\r\r\n sf_sweep = bursted and sf_sweep\r\r\n meas_sf_list = range(10) if sf_sweep else [2] # 2 is always UL\r\r\n for meas_sf in meas_sf_list:\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n if sf_is_uplink(ud_config, meas_sf):\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n else:\r\r\n # Non-UL subframe, do not expect signal\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n # Check going back to driver mode\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def test_tdwr():\n f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))\n assert f.prod_desc.prod_code == 182", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def test_tte5(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename)\n self.assertTrue(True)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def encrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension not in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t#Start To CHecking The PlatForm\n\t\t\t# if platform.system() == \"Windows\":\n\t\t\t# \tself.path_dir = self.path.split(\"\\\\\")[-1]\n\t\t\t# elif platform.system() == \"Linux\":\n\t\t\t# \tself.path_dir = self.path.split('/')[-1]\n\t\t\t# #End Checking Wich Platform\n\t\t\t# print('Encryption of '+self.path_dir+'...')\n\t\t\t# print('It\\'s may take a will')\n\t\t\t################################### Blowfish Algorithm ##############################\n\t\t\tbs = Blowfish.block_size\n\t\t\tiv = Random.new().read(bs)\n\t\t\tpadding = b\"}\"\n\t\t\tp = lambda s: s+(bs - len(s) % bs )*padding\n\t\t\tc= Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tencrypt = iv + c.encrypt(p(file_data))\n\t\t\tself.encrypt = base64.b64encode(encrypt) \n\t\t\t################################################################\n\t\t\t#print(\"writing in your file ...\")\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.path + self.extension,\"wb\") as newfile:\n\t\t\t\tnewfile.write(self.encrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint('Done In '+ time.time() -t)\n\t\telse:\n\t\t\tprint('The File is already encrypt.')", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def encryptor(file_name, key, plaintext):\n\twith open(file_name, 'w') as efile:\n\t\tenc = encrypt(key, plaintext)\n\t\tefile.write(enc)\n\t\tefile.close()\n\t\tetext = \"An encrypted passfile was created named key.enc for further use in this script by the user: \"\n\t\tcreateLog(etext, 'logs/macupdate.log')", "def disable_tee(self):\n self._tee = False", "def test_storage_truncation(tmp_path):\n file = tmp_path / \"test_storage_truncation.hdf5\"\n for truncate in [True, False]:\n storages = [MemoryStorage()]\n if module_available(\"h5py\"):\n storages.append(FileStorage(file))\n tracker_list = [s.tracker(interval=0.01) for s in storages]\n\n grid = UnitGrid([8, 8])\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n eq = DiffusionPDE()\n\n eq.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)\n if truncate:\n for storage in storages:\n storage.clear()\n eq.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)\n\n times = np.arange(0.1, 0.201, 0.01)\n if not truncate:\n times = np.r_[np.arange(0, 0.101, 0.01), times]\n for storage in storages:\n msg = f\"truncate={truncate}, storage={storage}\"\n np.testing.assert_allclose(storage.times, times, err_msg=msg)\n\n if any(platform.win32_ver()):\n for storage in storages:\n if isinstance(storage, FileStorage):\n storage.close()\n\n assert not storage.has_collection", "def change_TTS_engine(self):\n\t\t\n\t\tif self.isActiveDualTTS:\n\t\t\t#dual TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/dual_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is enabled. Using Amazon Polly TTS in case of internet connection, else use offline Picotts TTS.')\n\t\t\t\n\t\telse:\n\t\t\t#go back to single offline Picotts TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/single_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is disabled. Using offline Picotts TTS regardless of internect connection.')", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_ttd1(self):\n filename = str(self.temp_j2k_filename)\n\n # Produce the tte0 output file for ttd0 input.\n self.xtx1_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 128,\n 'y1': 128,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def test_ttd0(self):\n filename = str(self.temp_j2k_filename)\n ttx0_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 1000,\n 'y1': 1000,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def freeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:0\"\n ).wait()", "def setupForFTK(self):\n t1 = self.getKeyword('ISS CONF T1NAME').strip()\n t2 = self.getKeyword('ISS CONF T2NAME').strip()\n #swapped = self.getKeyword('ISS PRI STS'+t1[2]+' GUIDE_MODE').strip()\n\n fsub_pos_fri = self.maxSnrInScan(fsu='FSUB', opdc='OPDC', plot=1)\n fsua_pos_fri = self.maxSnrInScan(fsu='FSUA', opdc='OPDC', plot=2)\n print '---{'+self.insmode+'}---'\n if swapped == 'NORMAL':\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =',-fsub_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL1 NAME').strip(),\\\n '] = ',(fsub_pos_fri-fsua_pos_fri)\n else:\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =', fsua_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL2 NAME').strip(),\\\n '] = ',(fsua_pos_fri-fsub_pos_fri)\n return", "def setup_tdelta(self, dir1: str, num1: int, pos1: str, dir2: str, num2: int, pos2: str) -> None:\n cmd = ':measure:define deltatime,{0},{1},{2},{3},{4},{5}'.format(dir1, num1, pos1, dir2, num2, pos2)\n self.write(cmd)", "def decrypt(self, payload):\r\n\r\n #print(b'payload: %s'%(payload))\r\n decrypt1 = aes(self.ivkey, 2, self.staticiv)\r\n iv = decrypt1.decrypt(b'%s'%(payload['eiv']))\r\n #print(b'iv : %s'%(iv))\r\n decrypt2 = aes(b'%s'%(self.datakey), 2, b'%s'%(iv))\r\n temp = decrypt2.decrypt(b'%s'%(payload['ed']))\r\n #print(b'data : %s'%(temp))\r\n x_accel = int.from_bytes(temp[:4],\"big\")\r\n y_accel = int.from_bytes(temp[4:8],\"big\")\r\n z_accel = int.from_bytes(temp[8:12],\"big\")\r\n temp = float(temp[12:])\r\n print(x_accel,y_accel,z_accel,temp)\r\n temp1 = dict()\r\n \r\n temp1[\"value1\"] = str(x_accel)\r\n temp1[\"value2\"] = str(y_accel)\r\n temp1[\"value3\"] = str(z_accel)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = str(temp)\r\n temp1[\"value2\"] = str(self.nodeid)\r\n temp1[\"value3\"] = str(self.sessionID)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = ''\r\n temp1[\"value2\"] = ''\r\n temp1[\"value3\"] = ''\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n \r\n if self.x_accel == None or self.y_accel == None or self.z_accel == None:\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n elif abs(self.x_accel - x_accel) > 30 or abs(self.y_accel - y_accel) > 30 or abs(self.z_accel - z_accel) > 30:\r\n self.R_LED.value(1)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n else:\r\n self.R_LED.value(0)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n if self.temp == None:\r\n self.temp = temp\r\n \r\n elif abs(self.temp - temp) < 1:\r\n self.G_LED.freq(10)\r\n elif abs(self.temp - temp) >= 1:\r\n if 10 + (5 * int(temp - self.temp)) < 0:\r\n self.G_LED.freq(0)\r\n elif temp - self.temp <= -1:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n else:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n \r\n return \"Successful Decryption\"", "def fetch_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n uraw = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.nii.gz'\n ubval = 'http://dl.dropbox.com/u/2481924/tawian_ntu_dsi.bval'\n ubvec = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.bvec'\n ureadme = 'http://dl.dropbox.com/u/2481924/license_taiwan_ntu_dsi.txt'\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n\n md5_list = ['950408c0980a7154cb188666a885a91f', # data\n '602e5cb5fad2e7163e8025011d8a6755', # bval\n 'a95eb1be44748c20214dc7aa654f9e6b', # bvec\n '7fa1d5e272533e832cc7453eeba23f44'] # license\n\n url_list = [uraw, ubval, ubvec, ureadme]\n fname_list = ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw DSI data (91MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n print('See DSI203_license.txt for LICENSE.')\n print('For the complete datasets please visit :')\n print('http://dsi-studio.labsolver.org')\n\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def DirDE():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n dec.Asm.New_Label = ''\n return\n\n register = -1\n reg = assem.GetWord().upper()\n if (len(reg) == 2 or len(reg) == 3) and reg[0] == 'R':\n # Can it be a register name? Must be 2 or 3 chars long and start with R\n reg = reg[1:]\n if reg.isdigit:\n # The register number must be numeric of course\n if len(reg) == 1 or reg[0] != '0':\n # It is numeric, without a leading 0\n register = int(reg)\n if register < 0 or register > 31:\n # It is not a legal register\n errors.DoError('badoper', False)\n dec.Asm.New_Label = ''\n else:\n # It is a legal register, set it's value\n dec.Asm.BOL_Address = register\n dec.Asm.List_Address = register\n dec.Asm.Mnemonic = '.SE' # Handle rest like .SE\n\n # Ignore more parameters this time (like .EQ).", "def kv_esx_init():\n disk_lib_init()", "def test_once(config, qemu_img=False):\n\n iotests.log(\"# ================= %s %s =================\" % (\n \"qemu-img\" if qemu_img else \"dm-crypt\", config))\n\n oneKB = 1024\n oneMB = oneKB * 1024\n oneGB = oneMB * 1024\n oneTB = oneGB * 1024\n\n # 4 TB, so that we pass the 32-bit sector number boundary.\n # Important for testing correctness of some IV generators\n # The files are sparse, so not actually using this much space\n image_size = 4 * oneTB\n if qemu_img:\n iotests.log(\"# Create image\")\n qemu_img_create(config, image_size / oneMB)\n else:\n iotests.log(\"# Create image\")\n create_image(config, image_size / oneMB)\n\n lowOffsetMB = 100\n highOffsetMB = 3 * oneTB / oneMB\n\n try:\n if not qemu_img:\n iotests.log(\"# Format image\")\n cryptsetup_format(config)\n\n for slot in config.active_slots()[1:]:\n iotests.log(\"# Add password slot %s\" % slot)\n cryptsetup_add_password(config, slot)\n\n # First we'll open the image using cryptsetup and write a\n # known pattern of data that we'll then verify with QEMU\n\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Write test pattern 0xa7\")\n qemu_io_write_pattern(config, 0xa7, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Write test pattern 0x13\")\n qemu_io_write_pattern(config, 0x13, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n\n # Ok, now we're using QEMU to verify the pattern just\n # written via dm-crypt\n\n iotests.log(\"# Read test pattern 0xa7\")\n qemu_io_read_pattern(config, 0xa7, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Read test pattern 0x13\")\n qemu_io_read_pattern(config, 0x13, highOffsetMB, 10, dev=False)\n\n\n # Write a new pattern to the image, which we'll later\n # verify with dm-crypt\n iotests.log(\"# Write test pattern 0x91\")\n qemu_io_write_pattern(config, 0x91, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Write test pattern 0x5e\")\n qemu_io_write_pattern(config, 0x5e, highOffsetMB, 10, dev=False)\n\n\n # Now we're opening the image with dm-crypt once more\n # and verifying what QEMU wrote, completing the circle\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Read test pattern 0x91\")\n qemu_io_read_pattern(config, 0x91, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Read test pattern 0x5e\")\n qemu_io_read_pattern(config, 0x5e, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n finally:\n iotests.log(\"# Delete image\")\n delete_image(config)\n print", "def operate_cipher(self):", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def run(filename=\"input.json\", path=\".\", **args):\n\n logger = logging.getLogger(__name__)\n \n #read input file (need to add command line specification)\n logger.info(\"Begin processing input file: %s\" % filename)\n eos_dict, thermo_dict, output_file = read_input.extract_calc_data(filename, path, **args)\n eos_dict['jit'] = args['jit']\n\n if output_file:\n file_dict = {\"output_file\":output_file}\n else:\n file_dict = {\"output_file\": \"despasito_out.txt\"}\n\n logger.debug(\"EOS dict:\", eos_dict)\n logger.debug(\"Thermo dict:\", thermo_dict)\n logger.info(\"Finish processing input file: {}\".format(filename))\n \n eos = eos_mod(**eos_dict)\n \n # Run either parametrization or thermodynamic calculation\n if \"opt_params\" in list(thermo_dict.keys()):\n logger.info(\"Initializing parametrization procedure\")\n output_dict = fit(eos, thermo_dict)\n #output = fit(eos, thermo_dict)\n logger.info(\"Finished parametrization\")\n write_output.writeout_fit_dict(output_dict,eos,**file_dict)\n else:\n logger.info(\"Initializing thermodynamic calculation\")\n output_dict = thermo(eos, thermo_dict)\n logger.info(\"Finished thermodynamic calculation\")\n write_output.writeout_thermo_dict(output_dict,thermo_dict[\"calculation_type\"],**file_dict)" ]
[ "0.51985294", "0.50600356", "0.50008357", "0.49936765", "0.49839947", "0.49248576", "0.49247414", "0.49126267", "0.48398393", "0.48395008", "0.48108664", "0.47869807", "0.47569895", "0.4744264", "0.47037157", "0.4688601", "0.46353415", "0.46312973", "0.4626369", "0.46131372", "0.45700085", "0.45672718", "0.45496738", "0.4536564", "0.45131868", "0.45089722", "0.4489431", "0.4483289", "0.44781137", "0.44769973", "0.4466658", "0.44649532", "0.44538534", "0.44298974", "0.4412921", "0.44076046", "0.440755", "0.43913084", "0.4387576", "0.43838364", "0.4383596", "0.4382418", "0.43727472", "0.43727472", "0.43645084", "0.43507543", "0.43401808", "0.43401292", "0.4339117", "0.4338982", "0.4334919", "0.4333715", "0.43275335", "0.43261197", "0.43259275", "0.43235606", "0.43136737", "0.43069398", "0.43026572", "0.43002772", "0.42953813", "0.42936644", "0.42847776", "0.4277469", "0.4275886", "0.42752773", "0.4270519", "0.42693955", "0.4268867", "0.42680377", "0.4267188", "0.42644227", "0.42636308", "0.42596194", "0.42508385", "0.42458782", "0.4243887", "0.42417216", "0.42367524", "0.42350447", "0.42307627", "0.42268273", "0.4226781", "0.42246976", "0.42157528", "0.4214676", "0.42080277", "0.42047888", "0.4204762", "0.41937786", "0.4184817", "0.41823697", "0.41806543", "0.41770038", "0.41751164", "0.41744587", "0.41680625", "0.4165889", "0.416574", "0.41644755" ]
0.49602553
5
TDE allows you to perform realtime I/O encryption and decryption on data files. Data is encrypted before it is written to a disk and is decrypted when it is read from the disk to the memory. For more information, see [Configure TDE](~~131048~~). > You cannot disable TDE after it is enabled.
async def modify_dbinstance_tdewith_options_async( self, request: dds_20151201_models.ModifyDBInstanceTDERequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyDBInstanceTDEResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.encryption_key): query['EncryptionKey'] = request.encryption_key if not UtilClient.is_unset(request.encryptor_name): query['EncryptorName'] = request.encryptor_name if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.role_arn): query['RoleARN'] = request.role_arn if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.tdestatus): query['TDEStatus'] = request.tdestatus req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyDBInstanceTDE', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyDBInstanceTDEResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")", "def save_data(self):\n\n #\n # t=self.t[0:-1:self.R].reshape([self.t[0:-1:self.R].shape[0],1])\n\n def deterministic_data():\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'DetParameters' + strPrefix + '.txt'\n name2 = 'DetSolution' + strPrefix + '.txt'\n name3 = 'DetRefSolution' + str(self.dt) + '.txt'\n\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n np.savetxt(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n\n def stochastic_data():\n \"\"\"\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n Utem1 = self.Xtem[:, 0]\n Utem2 = self.Xtem[:, 1]\n Utem3 = self.Xtem[:, 2]\n \"\"\"\n\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'StoParameters' + strPrefix + '.txt'\n '''\n name2 = 'StoSolution' + strPrefix + '.txt'\n name3 = 'StoRefSolution' + str(self.dt) + '.txt'\n '''\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n '''\n np.save(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3, Utem1,\n Utem2, Utem3\n )\n ))\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3\n )\n ))\n if self.sigma1 == 0.0:\n if self.sigma2 == 0.0:\n DeterministicData()\n return\n StochasticData()\n '''\n\n return", "def setup_tdd(self,earfcn,bwMhz,powerdBm,ud_config,special_sf_config=0,ul_timing_advance=0,with_rx=False):\r\r\n\r\r\n self.setup_modem()\r\r\n self.instr.setup_4g_tx_test(cable_loss_dB=self.testConfig.cable_loss)\r\r\n self.teststep_idx = 0\r\r\n band,freq_ul,freq_dl = lte_util.get_lte_ul_dl_freq_band(earfcn)\r\r\n\r\r\n self.set_band(band=band)\r\r\n self.modemObj.set_rat_band(rat='LTE', band=band)\r\r\n duplex_mode = self.get_duplex_mode()\r\r\n assert(duplex_mode == \"TDD\")\r\r\n self.instr.lte_tx.set_duplex_mode(duplex_mode=duplex_mode)\r\r\n self.instr.lte_tx.set_band(band=band)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul)\r\r\n self.instr.lte_tx.set_rf_freqMHz(freqMHz=freq_ul)\r\r\n self.set_bw(bwMHz=bwMhz)\r\r\n rf_config = LTE_rf_config(bwMHz=bwMhz)\r\r\n self.modemObj.set_rb(direction='ul', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb(direction='dl', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb_start(rb_offset=rf_config.rb_offset)\r\r\n self.modemObj.set_rb_len(rb_len=rf_config.rb_len)\r\r\n rf_config.check_config()\r\r\n self.instr.lte_tx.set_channel_bw_MHz(bwMHz=bwMhz)\r\r\n self.modemObj.send_ul_pattern()\r\r\n\r\r\n self.set_ud_config(ud_config)\r\r\n self.modemObj.set_ud_config(ud_config)\r\r\n self.instr.lte_tx.set_ul_dl_conf(ud_config)\r\r\n\r\r\n self.modemObj.enable_tx()\r\r\n\r\r\n bursted = not (ud_config==\"TEST0\" or ud_config==\"TEST1\")\r\r\n self.setup_tdd_trigger(bursted,special_sf_config)\r\r\n\r\r\n self.modemObj.set_special_sf_config(special_sf_config)\r\r\n self.instr.lte_tx.set_special_subframe_conf(special_sf_config)\r\r\n\r\r\n self.modemObj.set_ul_timing_advance(ul_timing_advance)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.instr.lte_tx.set_rf_exp_power(power_dBm=powerdBm+5)\r\r\n self.instr.waitForCompletion()\r\r\n\r\r\n if with_rx:\r\r\n assert(bursted)\r\r\n self.modemObj.set_freqMHz(direction='rx',freqMHz=freq_dl)\r\r\n self.modemObj.set_rxagc_auto(ant='m')\r\r\n self.modemObj.enable_rx(ant='m')\r\r\n\r\r\n self.set_test_afc_val()\r\r\n\r\r\n return bursted", "def main():\n\n # performs crib dragging using initial values\n plaintext1, plaintext2 = crib_drag('', '', 0, 0)\n\n if plaintext1 is None or plaintext2 is None:\n print('No possible English decryption using the current dictionary')\n return\n\n # find the key and creates file with results\n plaintext1 = plaintext1[:CIPHER_LEN]\n plaintext2 = plaintext2[:CIPHER_LEN]\n key = find_key(plaintext1, plaintext2)\n\n with open('plaintext1.txt', 'w') as plain_file:\n plain_file.write(plaintext1)\n with open('plaintext2.txt', 'w') as plain_file:\n plain_file.write(plaintext2)\n with open('key.txt', 'wb') as plain_file:\n plain_file.write(key)", "def TestTDDFT():\n prm = '''\n Model\tTDHF\n Method\tMMUT\n dt\t0.02\n MaxIter\t100\n ExDir\t1.0\n EyDir\t1.0\n EzDir\t1.0\n FieldAmplitude\t0.01\n FieldFreq\t0.9202\n ApplyImpulse\t1\n ApplyCw\t\t0\n StatusEvery\t10\n '''\n geom = \"\"\"\n H 0. 0. 0.\n H 0. 0. 0.9\n H 2.0 0. 0\n H 2.0 0.9 0\n \"\"\"\n output = re.sub(\"py\",\"dat\",sys.argv[0])\n mol = gto.Mole()\n mol.atom = geom\n mol.basis = 'sto-3g'\n mol.build()\n the_scf = pyscf.dft.RKS(mol)\n the_scf.xc='HF'\n print \"Inital SCF finished. E=\", the_scf.kernel()\n aprop = tdscf.tdscf(the_scf,prm,output)\n return", "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def test_single_dft():\n test_file = os.path.join(DATA_DIR, 'test39_dft.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -4.8538264773648E+02 * Ha # energy in eV\n assert info['k'] == '6x6x6' # Monkhorst-Pack net\n assert info['H'] == \"LDA/PZ_LDA\"\n assert info['ncycles'][0] == 9\n assert info['electrons']['basis_set']['ecp']['Ge'][0][1] == (0.82751, -1.26859, -1)\n assert info['electrons']['basis_set']['bs']['Ge'][0][1] == (1.834, 0.4939, 0.006414)", "def enable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = True\n\t\tself.change_TTS_engine()", "def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt", "def tdd():\n\n with lcd(FRONTENDDIR):\n cmd = '%(gulp)s tdd' % {'gulp': get_gulp()}\n local(cmd)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "def disable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = False\n\t\tself.change_TTS_engine()", "def main(ctx, access_key, host, debug):\n info = {\n \"access_key\": access_key,\n \"host\": host,\n \"DEBUG\": debug\n }\n\n _tda = None\n\n if access_key == \"\":\n configFile = _config_filepath()\n if os.path.exists(configFile):\n with open(configFile, \"r\", encoding=\"utf-8\") as cf:\n if cf.read() != \"\":\n info = _getConf()\n info[\"DEBUG\"] = debug\n\n if info[\"access_key\"] != \"\":\n _tda = TDA(info[\"access_key\"], info[\"host\"])\n if info[\"DEBUG\"]:\n _tda.Debug()\n\n ctx.obj = _tda", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def encrypt():\n print(\"Use sops to encrypt the file.\")\n print(\"Learn more at https://github.com/mozilla/sops\")", "def test_tte4(self):\n filename = str(self.temp_j2k_filename)\n xtx4_setup(filename)\n self.assertTrue(True)", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t# #Start Checking the Platform\n\t\t\t# if platform.system() == 'Windows':\n\t\t\t# \tself.path = self.path.split('\\\\')[-1]\n\t\t\t# elif platform.system() == 'Linux':\n\t\t\t# \tself.path = self.path.split('/')[-1]\n\t\t\t# # END Checking\n\t\t\t# print('Decryption of '+ self.path +\"...\")\n\t\t\t######################### Blowfish Decryption Algorithm ###############\n\t\t\tbs = Blowfish.block_size\n\t\t\trealData = base64.b64decode(file_data)[8:]\n\t\t\tiv = base64.b64decode(file_data)[:8]\n\t\t\tdecrypt = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tself.decrypt = decrypt.decrypt(realData)\n\t\t\t########################### End Blowfish #########################\n\t\t\t#print('Writing in your file...')\n\t\t\tself.out = self.path.replace(self.extension,'')\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.out,'wb') as outfile:\n\t\t\t\toutfile.write(self.decrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint(\"Done in \",time.time() - t)\n\t\t\t\n\t\telse:\n\t\t\tprint('The File is Not Encrypted To Decrypted.')", "def get_TTS_data(self, exchanged_token, exchange=False):\n if os.path.exists(self.lock_file):\n ctime = os.stat(self.lock_file).st_ctime\n age = time.time() - ctime\n if age < self.age:\n self.log.error(\"Update already in progres. Sleeping ..\")\n time.sleep(self.age - age)\n else:\n self.log.error(\"Stale lock file, removing ...\")\n os.remove(self.lock_file)\n open(self.lock_file, 'w+').close()\n\n if exchange:\n with file('/tmp/refresh_token') as f:\n refresh_token = f.read()\n self.exchanged_token = self.refresh_token(self.client_id, self.client_secret, refresh_token.strip())\n if isinstance(self.exchanged_token, int):\n self.log.error(\"refresh_token error\")\n\n if self.get_certificate(self.credential_endpoint):\n # load json and prepare objects\n with open('/tmp/output.json') as tts_data_file:\n tts_data = json.load(tts_data_file)\n \n f = open(self.user_cert, 'w+')\n f.write(str(tts_data['credential']['entries'][0]['value']))\n f.close()\n \n f = open(self.user_key, 'w+')\n f.write(str(tts_data['credential']['entries'][1]['value']))\n f.close()\n \n f = open(self.user_passwd, 'w+')\n f.write(str(tts_data['credential']['entries'][2]['value']))\n f.close()\n \n try:\n os.chmod(self.user_key, 0600)\n except OSError, e:\n self.log.error(e)\n self.log.error(\"Permission denied to chmod passwd file\")\n return False\n \n os.remove(self.lock_file)\n \n return True\n else:\n return False", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(path, key, default, output, url, token, vaultpath):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n data = get_config(path, file_type, default=False)\n data = decrypt_credentials(data, key)\n\n # Only merge the DEFAULT section after decrypting.\n if default:\n data = merge_default(data)\n\n if url:\n try:\n import hvac\n except:\n print('''\nTo use Hashicorp's Vault you must install the hvac package.\nTo install it try using the following command:\n\n pip install hvac\n''')\n exit(3)\n\n if not token:\n token = os.environ.get('VAULT_TOKEN', '')\n if not token:\n token = getpass('Vault token: ')\n \n client = hvac.Client(url=url, token=token)\n if not vaultpath:\n vaultpath = path\n\n if vaultpath[0] == '~':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '/':\n vaultpath = vaultpath[1:]\n\n data = merge_default(data)\n for heading in data:\n # kargs = { heading: json.dumps(data[heading]) }\n client.write(vaultpath + '/' + heading, **data[heading])\n\n else:\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n config_ini = configparser.ConfigParser(\n dict_type=OrderedDict,\n default_section=default_section,\n interpolation=None)\n for heading in data:\n config_ini.add_section(heading)\n for item in data[heading]:\n config_ini.set(heading, item, data[heading][item])\n config_ini.write(save_file)\n\n else:\n write_yaml(save_file, data)", "def showTF(tf,outDir):\n\n nlo2lo,data2lo,data2nlo,data2lo_A,data2nlo_A=tf\n\n c=ROOT.TCanvas('c','c',500,500)\n c.SetBottomMargin(0)\n c.SetTopMargin(0)\n c.SetLeftMargin(0)\n c.SetRightMargin(0)\n c.cd()\n\n p1=ROOT.TPad('p1','p1',0,0.5,1,1.0)\n p1.Draw()\n p1.SetRightMargin(0.03)\n p1.SetLeftMargin(0.12)\n p1.SetTopMargin(0.1)\n p1.SetBottomMargin(0.01)\n p1.SetGridy()\n p1.cd()\n nlo2lo.Draw('e2')\n nlo2lo.GetYaxis().SetTitle('Z ratio')\n nlo2lo.GetYaxis().SetNdivisions(5)\n nlo2lo.GetXaxis().SetTitleSize(0)\n nlo2lo.GetXaxis().SetLabelSize(0)\n nlo2lo.GetYaxis().SetTitleSize(0.08)\n nlo2lo.GetYaxis().SetTitleOffset(0.8)\n nlo2lo.GetYaxis().SetLabelSize(0.08)\n nlo2lo.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo.Draw('e1same')\n data2nlo.Draw('e1same')\n\n leg1=p1.BuildLegend(0.7,0.88,0.95,0.66)\n leg1.SetFillStyle(0)\n leg1.SetBorderSize(0)\n leg1.SetTextFont(42)\n leg1.SetTextSize(0.06)\n\n l1=ROOT.TLine()\n l1.SetLineWidth(2)\n l1.SetLineColor(ROOT.kBlue)\n l1.DrawLine(data2lo.GetXaxis().GetXmin(),1,data2lo.GetXaxis().GetXmax(),1)\n\n txt=ROOT.TLatex()\n txt.SetNDC(True)\n txt.SetTextFont(42)\n txt.SetTextSize(0.08)\n txt.SetTextAlign(12)\n txt.DrawLatex(0.12,0.95,'#bf{CMS} #it{preliminary}')\n p1.RedrawAxis()\n\n c.cd()\n p2=ROOT.TPad('p2','p2',0,0,1,0.5)\n p2.SetRightMargin(0.03)\n p2.SetLeftMargin(0.12)\n p2.SetTopMargin(0.01)\n p2.SetBottomMargin(0.18)\n p2.SetGridy()\n p2.Draw()\n p2.cd()\n data2lo_A.Draw('e1')\n data2lo_A.GetYaxis().SetTitle('#gamma ratio')\n data2lo_A.GetYaxis().SetNdivisions(5)\n data2lo_A.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo_A.GetXaxis().SetTitleSize(0.08)\n data2lo_A.GetXaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleSize(0.08)\n data2lo_A.GetYaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleOffset(0.8)\n data2nlo_A.Draw('e1same')\n \n leg2=p2.BuildLegend(0.7,0.94,0.95,0.80)\n leg2.SetFillStyle(0)\n leg2.SetBorderSize(0)\n leg2.SetTextFont(42)\n leg2.SetTextSize(0.06)\n \n l2=ROOT.TLine()\n l2.SetLineColor(ROOT.kBlue)\n l2.SetLineWidth(2)\n l2.DrawLine(data2lo_A.GetXaxis().GetXmin(),1,data2lo_A.GetXaxis().GetXmax(),1)\n\n p2.RedrawAxis()\n\n c.cd()\n c.Modified()\n c.Update()\n for ext in ['png','pdf']:\n c.SaveAs('{0}.{1}'.format(outDir,ext))", "def enable_tee(self):\n self._tee = True", "def test_simulation_persistence(compression, tmp_path):\n path = tmp_path / \"test_simulation_persistence.hdf5\"\n storage = FileStorage(path, compression=compression)\n\n # write some simulation data\n pde = DiffusionPDE()\n grid = UnitGrid([16, 16]) # generate grid\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n pde.solve(state, t_range=0.11, dt=0.001, tracker=storage.tracker(interval=0.05))\n storage.close()\n\n # read the data\n storage = FileStorage(path)\n np.testing.assert_almost_equal(storage.times, [0, 0.05, 0.1])\n data = np.array(storage.data)\n assert data.shape == (3,) + state.data.shape\n grid_res = storage.grid\n assert grid == grid_res\n grid_res = storage.grid\n assert grid == grid_res", "def louder():\n try:\n ttsEng.louder()\n except Exception, e:\n logging.error(e)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def tesselate(options):\n if not options.freplace:\n if len(options.args) != 2:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file and '\n 'one output slf file\\n')\n i3s_file = options.args[0]\n out_file = options.args[1]\n else:\n if len(options.args) != 1:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file\\n')\n i3s_file = options.args[0]\n head, _ = path.splitext(i3s_file)\n out_file = head+'.slf'\n\n i3s_file = path.realpath(i3s_file)\n if not path.exists(i3s_file):\n raise TelemacException(\\\n '\\nCould not find '\n 'the file named: {}'.format(i3s_file))\n\n print('\\n\\nTessellating ' + path.basename(i3s_file) + ' within ' + \\\n path.dirname(i3s_file) + '\\n'+'~'*72+'\\n')\n i2s = InS(i3s_file)\n ikle2, ipob2, meshx, meshy = tessellate_poly(i2s, debug=True)\n\n print('\\n\\nWriting down the Selafin file ' + \\\n path.basename(out_file) + '\\n'+'~'*72+'\\n')\n slf = Selafin('')\n slf.title = ''\n slf.nplan = 1\n slf.ndp2 = 3\n slf.ndp3 = 3\n slf.nbv1 = 1\n slf.nvar = 1\n slf.varindex = 1\n slf.varnames = ['BOTTOM ']\n slf.varunits = ['M ']\n slf.ikle2 = ikle2\n slf.ikle3 = slf.ikle2\n slf.meshx = meshx\n slf.meshy = meshy\n slf.npoin2 = i2s.npoin\n slf.npoin3 = slf.npoin2\n slf.nelem2 = len(slf.ikle2)/slf.ndp3\n slf.nelem3 = slf.nelem2\n slf.iparam = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\n slf.ipob2 = ipob2\n slf.ipob3 = slf.ipob2\n slf.fole = {'hook':open(out_file, 'wb'), 'endian':\">\",\n 'float':('f', 4), 'name':out_file}\n slf.tags['times'] = [1]\n if options.sph2ll != None:\n radius = 6371000.\n long0, lat0 = options.sph2ll.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n const = np.tan(lat0/2. + np.pi/4.)\n slf.meshx = np.rad2deg(slf.meshx/radius + long0)\n slf.meshy = np.rad2deg(2.*np.arctan(const*np.exp(slf.meshy/radius)) \\\n - np.pi/2.)\n if options.ll2sph != None:\n radius = 6371000.\n long0, lat0 = options.ll2sph.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n slf.meshx = radius * (np.deg2rad(slf.meshx) - long0)\n slf.meshy = radius * \\\n (np.log(np.tan(np.deg2rad(slf.meshy)/2. + np.pi/4.)) \\\n - np.log(np.tan(lat0/2. + np.pi/4.)))\n if options.ll2utm != None:\n zone = int(options.ll2utm)\n slf.meshx, slf.meshy, zone = utm.from_lat_long(slf.meshx, slf.meshy,\n zone)\n if options.utm2ll != None:\n zone = int(options.utm2ll)\n slf.meshx, slf.meshy = utm.to_lat_long(slf.meshx, slf.meshy, zone)\n slf.append_header_slf()\n slf.append_core_time_slf(0)\n slf.append_core_vars_slf([np.zeros(slf.npoin2)])\n slf.fole['hook'].close()", "def decrypt(self, data):", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def test(DATASET=\"Texas\", CONFIG=None):\n if CONFIG is None:\n CONFIG = get_config_kACE(DATASET)\n print(f\"Loading {DATASET} data\")\n x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)\n if tf.config.list_physical_devices(\"GPU\") and not CONFIG[\"debug\"]:\n C_CODE = 3\n print(\"here\")\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [50, 50, C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [50, 50, C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_Y]},\n }\n else:\n print(\"why here?\")\n C_CODE = 1\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [C_Y]},\n }\n print(\"Change Detector Init\")\n cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)\n print(\"Training\")\n training_time = 0\n cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)\n for epochs in CONFIG[\"list_epochs\"]:\n CONFIG.update(epochs=epochs)\n tr_gen, dtypes, shapes = datasets._training_data_generator(\n x_im[0], y_im[0], cross_loss_weight[0], CONFIG[\"patch_size\"]\n )\n TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)\n TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)\n for x, y, _ in EVALUATE.batch(1):\n alpha = cd([x, y])\n cross_loss_weight = 1.0 - alpha\n training_time += tr_time\n\n cd.load_all_weights(cd.log_path)\n cd.final_evaluate(EVALUATE, **CONFIG)\n metrics = {}\n for key in list(cd.difference_img_metrics.keys()) + list(\n cd.change_map_metrics.keys()\n ):\n metrics[key] = cd.metrics_history[key][-1]\n metrics[\"F1\"] = metrics[\"TP\"] / (\n metrics[\"TP\"] + 0.5 * (metrics[\"FP\"] + metrics[\"FN\"])\n )\n timestamp = cd.timestamp\n epoch = cd.epoch.numpy()\n speed = (epoch, training_time, timestamp)\n del cd\n gc.collect()\n return metrics, speed", "def crypto_run(node_name, op_type, key, iv, in_text, out_text, tpm):\n mode_name, submode_name = node_name.split(':')\n submode_name = submode_name[:3].upper()\n\n mode = SUPPORTED_MODES.get(mode_name.upper())\n if not mode:\n raise subcmd.TpmTestError('unrecognizable mode in node \"%s\"' % node_name)\n\n submode = mode.submodes.get(submode_name, 0)\n cmd = '%c' % op_type # Encrypt or decrypt\n cmd += '%c' % submode # A particular type of a generic algorithm.\n cmd += '%c' % len(key)\n cmd += key\n cmd += '%c' % len(iv)\n if iv:\n cmd += iv\n cmd += struct.pack('>H', len(in_text))\n cmd += in_text\n if tpm.debug_enabled():\n print('%d:%d cmd size' % (op_type, mode.subcmd),\n len(cmd), utils.hex_dump(cmd))\n wrapped_response = tpm.command(tpm.wrap_ext_command(mode.subcmd, cmd))\n real_out_text = tpm.unwrap_ext_response(mode.subcmd, wrapped_response)\n if out_text:\n if len(real_out_text) > len(out_text):\n real_out_text = real_out_text[:len(out_text)] # Ignore padding\n if real_out_text != out_text:\n if tpm.debug_enabled():\n print('Out text mismatch in node %s:\\n' % node_name)\n else:\n raise subcmd.TpmTestError(\n 'Out text mismatch in node %s, operation %d:\\n'\n 'In text:%sExpected out text:%sReal out text:%s' % (\n node_name, op_type,\n utils.hex_dump(in_text),\n utils.hex_dump(out_text),\n utils.hex_dump(real_out_text)))\n return real_out_text", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def enableDestruction(self):\n self.destructable = True", "def set_dft(self, value):\n self.dft = value", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def SPIjedec(self):\n data=[0x9f, 0, 0, 0];\n data=self.SPItrans(data);\n jedec=0;\n self.JEDECmanufacturer=ord(data[1]);\n if self.JEDECmanufacturer==0xFF:\n self.JEDECtype=0x20;\n self.JEDECcapacity=0x14;\n jedec=0x202014;\n else:\n self.JEDECtype=ord(data[2]);\n self.JEDECcapacity=ord(data[3]);\n jedec=(ord(data[1])<<16)+(ord(data[2])<<8)+ord(data[3]);\n self.JEDECsize=self.JEDECsizes.get(self.JEDECcapacity);\n if self.JEDECsize==None:\n self.JEDECsize=0;\n \n if jedec==0x1F4501:\n self.JEDECsize=1024**2;\n self.JEDECdevice=jedec;\n return data;", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def _phi_ode(self, t, z):\n # prep states and phi_matrix\n state = z[0:len(self.istate)]\n ad_state = make_ad(state)\n phi_flat = clear_ad(z[len(self.istate):])\n phi = np.reshape(phi_flat, (len(self.istate),\n len(self.istate)))\n\n # find the accelerations and jacobian\n state_deriv = self.force_model.ode(0, ad_state)\n a_matrix = jacobian(self.force_model.ode(0, ad_state),\n ad_state)\n\n # compute the derivative of the STM and repackage\n phid = np.matmul(a_matrix, phi)\n phid_flat = phid.flatten()\n z_out = np.concatenate((state_deriv, phid_flat))\n\n return z_out", "def test_tte2(self):\n filename = str(self.temp_j2k_filename)\n xtx2_setup(filename)\n self.assertTrue(True)", "def create_tvel_file(\n depth: np.array,\n vp: np.array,\n vs: np.array,\n dens: np.array,\n save_folder: str,\n name: str = \"Test\",\n):\n\n assert (\n len(depth) == len(vp) and len(depth) == len(vs) and len(depth) == len(dens)\n ), \"All arrays (depth, vp, vs and dens) should be of same length\"\n\n \"\"\" combining all the data vector \"\"\"\n data = np.vstack((np.vstack((np.vstack((depth, vp)), vs)), dens)).T\n\n with open(join(save_folder, f\"{name}.tvel\"), \"w\") as f:\n f.write(\"# Input file for TauP\\n\")\n f.write(\"NAME TAYAK_BKE\\n\")\n for line in data:\n f.write(f\"{line[0]:8.2f}{line[1]:8.3f}{line[2]:8.3f}{line[3]:8.3f}\\n\")\n f.write(\n \"\"\" 1596.98 4.986 0.000 5.855\n 1853.05 5.150 0.000 6.025\n 2109.13 5.284 0.000 6.166\n 2365.20 5.393 0.000 6.280\n 2621.27 5.475 0.000 6.368\n 2877.35 5.534 0.000 6.430\n 3133.42 5.569 0.000 6.467\n 3389.50 5.569 0.000 6.467\"\"\"\n )\n f.close()", "def test_encrypt_decrypt(self):\n with open(self.file_path, \"rt\") as file:\n start_file = file.read()\n nonce1 = globals.generate_random_nonce()\n nonce2 = globals.generate_random_nonce()\n encrypted_file_path, additional_data = self.file_crypt.encrypt_file(\n self.file_path,\n nonce1,\n nonce2)\n file_decrypted = self.file_crypt.decrypt_file(\n file_path=encrypted_file_path,\n additional_data=additional_data)\n with open(file_decrypted, \"rt\") as file:\n end_file = file.read()\n self.assertEqual(start_file, end_file, \"Files differ!\")", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def t_xtide(*varargin):\n nargin = len(varargin)\n if nargin > 0:\n varargin = varargin[0]\n if not os.path.exists('t_xtide.mat'):\n # Read the harmonics file and make a mat file\n filnam = '/usr/share/xtide/harmonics.txt'\n fprintf(\"\\\\n********Can't find mat-file t_xtide.mat ********\\\\n\\\\n\")\n fprintf('Attempting to generate one from an xtide harmonics file....\\\\n\\\\n')\n fprintf('Latest version available from http://bel-marduk.unh.edu/xtide/files.html\\\\n\\\\n')\n # Input name\n fid = - 1\n while fid == - 1:\n\n rep = filnam\n while (lower(rep[0]) != 'y'):\n\n filnam = rep\n rep = 'n'\n rep = input_('Harmonics filename: ' + filnam + '? (y/Y/new file name):', 's')\n if (0 in rep.shape):\n rep = 'y'\n\n fid = open(filnam)\n if fid == - 1:\n fprintf(\"\\\\n****** Can't open filename ->\" + filnam + '<-\\\\n\\\\n')\n\n fprintf('Reading harmonics file (this will take a while)\\\\n')\n xtide, xharm = read_xtidefile(fid) # nargout=2\n fprintf('Saving harmonic information to t_xtide.mat\\\\n')\n savemat('t_xtide', 'xtide', 'xharm')\n else:\n loadmat('t_xtide',matlab_compatible=True)\n if nargin > 0:\n if isstr(varargin[0]):\n # Station name given\n # Identify station - look for exact match first\n ista = strmatch(lower(varargin[0]), lower(xharm.station), 'exact')\n # otherwise go for partial matches\n if (0 in ista.shape):\n # First check to see if a number was selected:\n inum = - 10\n while inum < - 1:\n\n inum = inum + 1\n ll = findstr(lower(varargin[0]), sprintf('(\\n %d)', - inum))\n if not (0 in ll.shape):\n inum = abs(inum)\n varargin[0] = deblank(varargin[0](range(1, (ll - 1 +1))))\n\n ista = strmatch(lower(varargin[0]), lower(xharm.station))\n if max(ista.shape) > 1:\n if inum > 0 & inum <= max(ista.shape):\n ista = ista[(inum -1)]\n else:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n if max(ista.shape) == 1 & inum > 1:\n fprintf(\"***Can't find variant (\\n %d) of station - Taking only choice\\\\n\", inum)\n else:\n if max(ista.shape) == 0:\n error('Could not match station')\n varargin[0] = np.array([])\n else:\n # Lat/long?\n dist, hdg = t_gcdist(xharm.latitude, xharm.longitude, varargin[1], varargin[0]) # nargout=2\n mind, ista = np.min(dist) # nargout=2\n if max(ista.shape) > 1:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n fprintf('\\n %5d: \\n %s\\\\n', ista, deblank(xharm.station(ista, :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista), xharm.latitude(ista))\n varargin[0:2] = np.array([])\n # Time vector (if available) otherwise take current time.\n if max(varargin.shape) > 0 & not isstr(varargin[0]):\n tim = varargin[0]\n tim = tim[:].T\n varargin[0] = np.array([])\n if max(tim.shape) == 1:\n if tim < 1000:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, (tim +1), 1 / 48)]).reshape(1, -1)\n else:\n tim = tim + np.array([range(0, 3, 1 / 48)]).reshape(1, -1)\n # 2 days worth.\n else:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, 49, 0.25)]).reshape(1, -1) / 24\n # Parse properties\n format_ = 'raw'\n unt = 'original'\n k = 1\n while max(varargin.shape) > 0:\n\n if 'for' == lower(varargin[-1](range(1, 4))):\n format_ = lower(varargin[1])\n else:\n if 'uni' == lower(varargin[-1](range(1, 4))):\n unt = lower(varargin[1])\n else:\n error(\"Can't understand property:\" + varargin[0])\n varargin[(np.array([1, 2]).reshape(1, -1) -1)] = np.array([])\n\n # if we want a time series\n pred = np.array([])\n # Convert units if requested.\n units, convf = convert_units(unt, xharm.units(ista, :)) # nargout=2\n if format_[0:2] == 'ra' | format_[0:2] == 'fu' | format_[0:2] == 'ti':\n # Data every minute for hi/lo forecasting.\n if format_[0:2] == 'ti':\n tim = range(tim[0], (tim[-1] +1), (1 / 1440))\n # Convert into time since the beginning of year\n mid = datevec(mean(tim))\n iyr = mid[0] - xtide.startyear + 1\n lt = max(tim.shape)\n xtim = np.dot((tim - datenum(mid[0], 1, 1)), 24)\n # Hours since beginning of year\n #-----------------------------------------------------\n # Sum up everything for the prediction!\n pred = xharm.datum(ista) + np.sum(repmat(xtide.nodefactor(:, iyr) * xharm.A(ista, :).T, 1, lt) * cos(np.dot((np.dot(xtide.speed, xtim) + repmat(xtide.equilibarg(:, iyr) - xharm.kappa(ista, :).T, 1, lt)), (pi / 180))), 1)\n #-----------------------------------------------------\n pred = np.dot(pred, convf)\n # Compute times of hi/lo from every-minute data\n if format_[0:2] == 'ti':\n # Check if this is a current station\n if not (0 in findstr('Current', xharm.station(ista, :)).shape):\n currents = 1\n else:\n currents = 0\n dpred = diff(pred)\n ddpred = diff(dpred > 0)\n flat = np.flatnonzero(ddpred != 0) + 1\n slk = np.flatnonzero(sign(pred[0:pred.shape[0] - 1]) != sign(pred[1:pred.shape[0]]))\n hi.mtime = tim[(flat -1)]\n hi.value = pred[(flat -1)]\n hi.type = np.zeros(shape=(flat.shape, flat.shape), dtype='float64')\n hi.type(np.flatnonzero(ddpred[(flat - 1 -1)] < 0)) = 1\n # 0=lo, 1=hi\n hi.units = deblank(units)\n pred = hi\n # Create information structure\n if format_[0:2] == 'in' | format_[0:2] == 'fu':\n if not (0 in pred.shape):\n pred.yout = pred\n pred.mtime = tim\n else:\n kk = np.flatnonzero(xharm.A(ista, :) != 0)\n pred.freq = xtide.name(kk, :)\n pred.A = np.dot(full(xharm.A(ista, kk).T), convf)\n pred.kappa = full(xharm.kappa(ista, kk).T)\n pred.station = deblank(xharm.station(ista, :))\n pred.longitude = xharm.longitude(ista)\n pred.latitude = xharm.latitude(ista)\n pred.timezone = xharm.timezone(ista)\n pred.units = deblank(units)\n pred.datum = np.dot(xharm.datum(ista), convf)\n # If no output parameters then we plot or display things\n if nargout == 0:\n if 'ti' == format_[(((0:2 -1) -1) -1)]:\n fprintf('High/Low Predictions for \\n %s\\\\n', xharm.station(ista, :))\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', xharm.timezone(ista))\n outstr = repmat(' ', max(flat.shape), 41)\n outstr[:, 0:20] = datestr(hi.mtime)\n outstr[:, 21:27] = reshape(sprintf('\\n %6.2f', hi.value), 6, max(flat.shape)).T\n if currents:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' Flood Tide', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Ebb Tide ', np.sum(ll), 1)\n else:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' High Tide ', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Low Tide ', np.sum(ll), 1)\n disp(outstr)\n else:\n if 'ra' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'fu' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred.yout)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'in' == format_[(((0:2 -1) -1) -1)]:\n fprintf('Station: \\n %s\\\\n', pred.station)\n if pred.longitude < 0:\n lon = 'W'\n else:\n lon = 'E'\n if pred.latitude < 0:\n lat = 'S'\n else:\n lat = 'N'\n fprintf(\"Location: \\n %d \\n %.1f' \\n %c, \\n %d \\n %.1f' \\n %c\\\\n\", fix(abs(pred.latitude)), np.dot(rem(abs(pred.latitude), 1), 60), lat, fix(abs(pred.longitude)), np.dot(rem(abs(pred.longitude), 1), 60), lon)\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', pred.timezone)\n clear('pred')\n #\n return pred", "def decryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n\n dec = Decrypt(infile)\n\n if mode.upper() == 'AES':\n decrypted_data = dec.AES(password)\n elif mode.upper() == 'DES':\n decrypted_data = dec.DES(password)\n elif mode.upper() == 'SALSA20':\n decrypted_data = dec.Salsa20(password)\n else:\n return 2\n\n if not decrypted_data:\n cleanup(outfile)\n return 3\n\n if not outfile.endswith(dec.extension):\n outfile += dec.extension\n write_data(decrypted_data, outfile)\n return 0", "def _dK_ode_dtheta(self, target):\r\n t_ode = self._t[self._index>0]\r\n dL_dK_ode = self._dL_dK[self._index>0, :]\r\n index_ode = self._index[self._index>0]-1\r\n if self._t2 is None:\r\n if t_ode.size==0:\r\n return \r\n t2_ode = t_ode\r\n dL_dK_ode = dL_dK_ode[:, self._index>0]\r\n index2_ode = index_ode\r\n else:\r\n t2_ode = self._t2[self._index2>0]\r\n dL_dK_ode = dL_dK_ode[:, self._index2>0]\r\n if t_ode.size==0 or t2_ode.size==0:\r\n return\r\n index2_ode = self._index2[self._index2>0]-1\r\n\r\n h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True)\r\n #self._dK_ddelay = self._dh_ddelay\r\n self._dK_dsigma = self._dh_dsigma\r\n\r\n if self._t2 is None:\r\n h2 = h1\r\n else:\r\n h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True)\r\n\r\n #self._dK_ddelay += self._dh_ddelay.T\r\n self._dK_dsigma += self._dh_dsigma.T\r\n # C1 = self.sensitivity\r\n # C2 = self.sensitivity\r\n\r\n # K = 0.5 * (h1 + h2.T)\r\n # var2 = C1*C2\r\n # if self.is_normalized:\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2\r\n # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2\r\n # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma))\r\n # dk_dC1 = C2 * sum(sum(dL_dK.*K))\r\n # dk_dC2 = C1 * sum(sum(dL_dK.*K))\r\n # else:\r\n # K = np.sqrt(np.pi) * K\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K))\r\n # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K))\r\n\r\n\r\n # dk_dSim1Variance = dk_dC1\r\n # Last element is the length scale.\r\n (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2)\r\n\r\n target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum()\r\n\r\n\r\n # # only pass the gradient with respect to the inverse width to one\r\n # # of the gradient vectors ... otherwise it is counted twice.\r\n # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance])\r\n # g2 = real([dk_dD2 0 dk_dSim2Variance])\r\n # return g1, g2\"\"\"\r", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]", "def example():\r\n path = os.path.abspath(os.path.dirname(__name__))\r\n module = CryptoModule()\r\n # create_name this is open source py module with confidential information\r\n opened_path = os.path.join(path, 'secret.py')\r\n # read_name this is open encrypted py module with confidential information\r\n secured_path = os.path.join(path, 'secured.py')\r\n # encrypt, read secret.py and create secured.py\r\n module.create_secured_module(path_to_opened_module=opened_path, path_to_secured_module=secured_path,\r\n create_key=True, delete_source_opened_module=False)\r\n # decrypt, read secured.py and create opened.py\r\n module.create_opened_module(path_to_secured_module=secured_path, path_to_opened_module=opened_path)\r\n print('ok')", "def test_tte1(self):\n filename = str(self.temp_j2k_filename)\n self.xtx1_setup(filename)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def detx(self, det_id, t0set=None, calibration=None):\n url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args\n if t0set is not None:\n url += '&t0set=' + t0set\n if calibration is not None:\n url += '&calibrid=' + calibration\n\n detx = self._get_content(url)\n return detx", "def unfreeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1\"\n ).wait()", "def main():\n \n # Ask for their option.\n \n inputFile = \"\"\n outputFile = \"\"\n \n choice = askOption()\n key = askForKey()\n \n inputFile = askInputFile()\n inputText = readText(inputFile)\n \n outputFile = askOutputFile()\n \n #Start the timer here.\n startTimer = time.time()\n \n # Depending on their choice, encode or decode.\n if choice == 'e':\n encryptedText = RouteCipher.encrypt(inputText, key)\n writeText(encryptedText, outputFile)\n elif choice == 'd':\n decryptedText = RouteCipher.decrypt(inputText, key)\n writeText(decryptedText, outputFile)\n \n finishTimer = time.time()\n totalTime = round(finishTimer - startTimer, 2)\n \n print(\"The operation was succesful\")\n print(f\"Total time needed: {totalTime}\")", "def StoreAntirollback(now, ar_filename, kern_f):\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)", "def test_dfunction_saveable(self):\n \n wa = FrequencyAxis(0.0, 1000, 0.1)\n \n fw = numpy.exp(-wa.data)\n \n fce = DFunction(wa,fw)\n \n #fce.plot()\n\n #with h5py.File(\"test_file_1\",driver=\"core\", \n # backing_store=False) as f:\n with tempfile.TemporaryFile() as f:\n \n fce.save(f, test=True)\n \n fce2 = DFunction()\n fce2 = fce2.load(f, test=True)\n \n #fce2.plot()\n \n numpy.testing.assert_array_equal(fce.data, fce2.data)", "def vmdexec(cmds):\n handle,filename=mkstemp(dir='/tmp')\n open(filename,'w').write(cmds)\n os.system('vmd -dispdev text -e %s'%filename) # run vmd in the terminal\n os.system('/bin/rm %s'%filename) # clean-up", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def construct_TDI(self, t, Orbit):\n\t\n\tself.make_padded_delta_l(t)\n\n\tp12 = td.Phase(1,2, t, self.delta_l_padded[0,1,:])\n\tp21 = td.Phase(2,1, t, self.delta_l_padded[1,0,:])\n\n\tp13 = td.Phase(1,3, t, self.delta_l_padded[0,2,:])\n\tp31 = td.Phase(3,1, t, self.delta_l_padded[2,0,:])\n\n\tp23 = td.Phase(2,3, t, self.delta_l_padded[1,2,:])\n\tp32 = td.Phase(3,2, t, self.delta_l_padded[2,1,:])\n \n\tp12.FT_phase(Orbit)\n\tp21.FT_phase(Orbit)\n\tp13.FT_phase(Orbit)\n\tp31.FT_phase(Orbit)\n\tp23.FT_phase(Orbit)\n\tp32.FT_phase(Orbit)\n\n\ttdi_GW = td.TDI(p12, p21, p13, p31, p23, p32, Orbit)\n\t\n\treturn tdi_GW", "def dftb_dftd3(third_ord, damp_flag, damp_exp):\n dftb_dftd3=\"\"\"\n ThirdOrderFull = {{ third_ord }}\n DampXH = {{ damp_flag }}\n DampXHExponent = {{ damp_exp }}\n Dispersion = DftD3{}\n}\n \"\"\"\n return Environment().from_string(dftb_dftd3).render(third_ord=third_ord, damp_flag=damp_flag, damp_exp=damp_exp)", "def test_encrypt_decrypt(self):\n reference = get_random_test_tensor()\n encrypted_tensor = SharedTensor(reference)\n self._check(encrypted_tensor, reference, 'en/decryption failed')", "def test_default_run_ubuntu_keep_vdmx():\n test_dir = os.path.join(\"tests\", \"test_files\", \"fonts\", \"temp\")\n notouch_inpath = os.path.join(\"tests\", \"test_files\", \"fonts\", \"Ubuntu-Regular.ttf\")\n test_inpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular.ttf\"\n )\n test_outpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular-dehinted.ttf\"\n )\n test_args = [test_inpath, \"--keep-vdmx\"]\n\n # setup\n if os.path.isdir(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n shutil.copyfile(notouch_inpath, test_inpath)\n\n # execute\n run(test_args)\n\n # test\n tt = TTFont(test_outpath)\n assert \"VDMX\" in tt\n\n # tear down\n shutil.rmtree(test_dir)", "def direct_mode_test(self,earfcn,bwMhz,powerdBm,ud_config,sf_sweep=False,with_rx=False):\r\r\n\r\r\n self.meas_list = ['FREQ_ERR','IQ_OFFSET', 'EVM']\r\r\n tol_dB = 1\r\r\n\r\r\n bursted = self.setup_tdd(earfcn,bwMhz,powerdBm,ud_config,with_rx=with_rx)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n\r\r\n # Note - Direct AGC value leads to different powers on different platforms\r\r\n # -- use driver mode and read back AGC value to get baseline,\r\r\n # then try that value in direct mode.\r\r\n dac_value = self.modemObj.query_txagc()\r\r\n\r\r\n # Set minimum power\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n # Set the original power, but as a direct gain DAC word this time.\r\r\n self.modemObj.set_txagc_direct(value=dac_value)\r\r\n\r\r\n sf_sweep = bursted and sf_sweep\r\r\n meas_sf_list = range(10) if sf_sweep else [2] # 2 is always UL\r\r\n for meas_sf in meas_sf_list:\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n if sf_is_uplink(ud_config, meas_sf):\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n else:\r\r\n # Non-UL subframe, do not expect signal\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n # Check going back to driver mode\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def test_tdwr():\n f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))\n assert f.prod_desc.prod_code == 182", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def test_tte5(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename)\n self.assertTrue(True)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def encrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension not in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t#Start To CHecking The PlatForm\n\t\t\t# if platform.system() == \"Windows\":\n\t\t\t# \tself.path_dir = self.path.split(\"\\\\\")[-1]\n\t\t\t# elif platform.system() == \"Linux\":\n\t\t\t# \tself.path_dir = self.path.split('/')[-1]\n\t\t\t# #End Checking Wich Platform\n\t\t\t# print('Encryption of '+self.path_dir+'...')\n\t\t\t# print('It\\'s may take a will')\n\t\t\t################################### Blowfish Algorithm ##############################\n\t\t\tbs = Blowfish.block_size\n\t\t\tiv = Random.new().read(bs)\n\t\t\tpadding = b\"}\"\n\t\t\tp = lambda s: s+(bs - len(s) % bs )*padding\n\t\t\tc= Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tencrypt = iv + c.encrypt(p(file_data))\n\t\t\tself.encrypt = base64.b64encode(encrypt) \n\t\t\t################################################################\n\t\t\t#print(\"writing in your file ...\")\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.path + self.extension,\"wb\") as newfile:\n\t\t\t\tnewfile.write(self.encrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint('Done In '+ time.time() -t)\n\t\telse:\n\t\t\tprint('The File is already encrypt.')", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def disable_tee(self):\n self._tee = False", "def encryptor(file_name, key, plaintext):\n\twith open(file_name, 'w') as efile:\n\t\tenc = encrypt(key, plaintext)\n\t\tefile.write(enc)\n\t\tefile.close()\n\t\tetext = \"An encrypted passfile was created named key.enc for further use in this script by the user: \"\n\t\tcreateLog(etext, 'logs/macupdate.log')", "def test_storage_truncation(tmp_path):\n file = tmp_path / \"test_storage_truncation.hdf5\"\n for truncate in [True, False]:\n storages = [MemoryStorage()]\n if module_available(\"h5py\"):\n storages.append(FileStorage(file))\n tracker_list = [s.tracker(interval=0.01) for s in storages]\n\n grid = UnitGrid([8, 8])\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n eq = DiffusionPDE()\n\n eq.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)\n if truncate:\n for storage in storages:\n storage.clear()\n eq.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)\n\n times = np.arange(0.1, 0.201, 0.01)\n if not truncate:\n times = np.r_[np.arange(0, 0.101, 0.01), times]\n for storage in storages:\n msg = f\"truncate={truncate}, storage={storage}\"\n np.testing.assert_allclose(storage.times, times, err_msg=msg)\n\n if any(platform.win32_ver()):\n for storage in storages:\n if isinstance(storage, FileStorage):\n storage.close()\n\n assert not storage.has_collection", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def change_TTS_engine(self):\n\t\t\n\t\tif self.isActiveDualTTS:\n\t\t\t#dual TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/dual_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is enabled. Using Amazon Polly TTS in case of internet connection, else use offline Picotts TTS.')\n\t\t\t\n\t\telse:\n\t\t\t#go back to single offline Picotts TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/single_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is disabled. Using offline Picotts TTS regardless of internect connection.')", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_ttd1(self):\n filename = str(self.temp_j2k_filename)\n\n # Produce the tte0 output file for ttd0 input.\n self.xtx1_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 128,\n 'y1': 128,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def test_ttd0(self):\n filename = str(self.temp_j2k_filename)\n ttx0_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 1000,\n 'y1': 1000,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def freeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:0\"\n ).wait()", "def setupForFTK(self):\n t1 = self.getKeyword('ISS CONF T1NAME').strip()\n t2 = self.getKeyword('ISS CONF T2NAME').strip()\n #swapped = self.getKeyword('ISS PRI STS'+t1[2]+' GUIDE_MODE').strip()\n\n fsub_pos_fri = self.maxSnrInScan(fsu='FSUB', opdc='OPDC', plot=1)\n fsua_pos_fri = self.maxSnrInScan(fsu='FSUA', opdc='OPDC', plot=2)\n print '---{'+self.insmode+'}---'\n if swapped == 'NORMAL':\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =',-fsub_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL1 NAME').strip(),\\\n '] = ',(fsub_pos_fri-fsua_pos_fri)\n else:\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =', fsua_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL2 NAME').strip(),\\\n '] = ',(fsua_pos_fri-fsub_pos_fri)\n return", "def setup_tdelta(self, dir1: str, num1: int, pos1: str, dir2: str, num2: int, pos2: str) -> None:\n cmd = ':measure:define deltatime,{0},{1},{2},{3},{4},{5}'.format(dir1, num1, pos1, dir2, num2, pos2)\n self.write(cmd)", "def decrypt(self, payload):\r\n\r\n #print(b'payload: %s'%(payload))\r\n decrypt1 = aes(self.ivkey, 2, self.staticiv)\r\n iv = decrypt1.decrypt(b'%s'%(payload['eiv']))\r\n #print(b'iv : %s'%(iv))\r\n decrypt2 = aes(b'%s'%(self.datakey), 2, b'%s'%(iv))\r\n temp = decrypt2.decrypt(b'%s'%(payload['ed']))\r\n #print(b'data : %s'%(temp))\r\n x_accel = int.from_bytes(temp[:4],\"big\")\r\n y_accel = int.from_bytes(temp[4:8],\"big\")\r\n z_accel = int.from_bytes(temp[8:12],\"big\")\r\n temp = float(temp[12:])\r\n print(x_accel,y_accel,z_accel,temp)\r\n temp1 = dict()\r\n \r\n temp1[\"value1\"] = str(x_accel)\r\n temp1[\"value2\"] = str(y_accel)\r\n temp1[\"value3\"] = str(z_accel)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = str(temp)\r\n temp1[\"value2\"] = str(self.nodeid)\r\n temp1[\"value3\"] = str(self.sessionID)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = ''\r\n temp1[\"value2\"] = ''\r\n temp1[\"value3\"] = ''\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n \r\n if self.x_accel == None or self.y_accel == None or self.z_accel == None:\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n elif abs(self.x_accel - x_accel) > 30 or abs(self.y_accel - y_accel) > 30 or abs(self.z_accel - z_accel) > 30:\r\n self.R_LED.value(1)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n else:\r\n self.R_LED.value(0)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n if self.temp == None:\r\n self.temp = temp\r\n \r\n elif abs(self.temp - temp) < 1:\r\n self.G_LED.freq(10)\r\n elif abs(self.temp - temp) >= 1:\r\n if 10 + (5 * int(temp - self.temp)) < 0:\r\n self.G_LED.freq(0)\r\n elif temp - self.temp <= -1:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n else:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n \r\n return \"Successful Decryption\"", "def fetch_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n uraw = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.nii.gz'\n ubval = 'http://dl.dropbox.com/u/2481924/tawian_ntu_dsi.bval'\n ubvec = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.bvec'\n ureadme = 'http://dl.dropbox.com/u/2481924/license_taiwan_ntu_dsi.txt'\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n\n md5_list = ['950408c0980a7154cb188666a885a91f', # data\n '602e5cb5fad2e7163e8025011d8a6755', # bval\n 'a95eb1be44748c20214dc7aa654f9e6b', # bvec\n '7fa1d5e272533e832cc7453eeba23f44'] # license\n\n url_list = [uraw, ubval, ubvec, ureadme]\n fname_list = ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw DSI data (91MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n print('See DSI203_license.txt for LICENSE.')\n print('For the complete datasets please visit :')\n print('http://dsi-studio.labsolver.org')\n\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def DirDE():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n dec.Asm.New_Label = ''\n return\n\n register = -1\n reg = assem.GetWord().upper()\n if (len(reg) == 2 or len(reg) == 3) and reg[0] == 'R':\n # Can it be a register name? Must be 2 or 3 chars long and start with R\n reg = reg[1:]\n if reg.isdigit:\n # The register number must be numeric of course\n if len(reg) == 1 or reg[0] != '0':\n # It is numeric, without a leading 0\n register = int(reg)\n if register < 0 or register > 31:\n # It is not a legal register\n errors.DoError('badoper', False)\n dec.Asm.New_Label = ''\n else:\n # It is a legal register, set it's value\n dec.Asm.BOL_Address = register\n dec.Asm.List_Address = register\n dec.Asm.Mnemonic = '.SE' # Handle rest like .SE\n\n # Ignore more parameters this time (like .EQ).", "def kv_esx_init():\n disk_lib_init()", "def test_once(config, qemu_img=False):\n\n iotests.log(\"# ================= %s %s =================\" % (\n \"qemu-img\" if qemu_img else \"dm-crypt\", config))\n\n oneKB = 1024\n oneMB = oneKB * 1024\n oneGB = oneMB * 1024\n oneTB = oneGB * 1024\n\n # 4 TB, so that we pass the 32-bit sector number boundary.\n # Important for testing correctness of some IV generators\n # The files are sparse, so not actually using this much space\n image_size = 4 * oneTB\n if qemu_img:\n iotests.log(\"# Create image\")\n qemu_img_create(config, image_size / oneMB)\n else:\n iotests.log(\"# Create image\")\n create_image(config, image_size / oneMB)\n\n lowOffsetMB = 100\n highOffsetMB = 3 * oneTB / oneMB\n\n try:\n if not qemu_img:\n iotests.log(\"# Format image\")\n cryptsetup_format(config)\n\n for slot in config.active_slots()[1:]:\n iotests.log(\"# Add password slot %s\" % slot)\n cryptsetup_add_password(config, slot)\n\n # First we'll open the image using cryptsetup and write a\n # known pattern of data that we'll then verify with QEMU\n\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Write test pattern 0xa7\")\n qemu_io_write_pattern(config, 0xa7, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Write test pattern 0x13\")\n qemu_io_write_pattern(config, 0x13, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n\n # Ok, now we're using QEMU to verify the pattern just\n # written via dm-crypt\n\n iotests.log(\"# Read test pattern 0xa7\")\n qemu_io_read_pattern(config, 0xa7, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Read test pattern 0x13\")\n qemu_io_read_pattern(config, 0x13, highOffsetMB, 10, dev=False)\n\n\n # Write a new pattern to the image, which we'll later\n # verify with dm-crypt\n iotests.log(\"# Write test pattern 0x91\")\n qemu_io_write_pattern(config, 0x91, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Write test pattern 0x5e\")\n qemu_io_write_pattern(config, 0x5e, highOffsetMB, 10, dev=False)\n\n\n # Now we're opening the image with dm-crypt once more\n # and verifying what QEMU wrote, completing the circle\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Read test pattern 0x91\")\n qemu_io_read_pattern(config, 0x91, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Read test pattern 0x5e\")\n qemu_io_read_pattern(config, 0x5e, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n finally:\n iotests.log(\"# Delete image\")\n delete_image(config)\n print", "def run(filename=\"input.json\", path=\".\", **args):\n\n logger = logging.getLogger(__name__)\n \n #read input file (need to add command line specification)\n logger.info(\"Begin processing input file: %s\" % filename)\n eos_dict, thermo_dict, output_file = read_input.extract_calc_data(filename, path, **args)\n eos_dict['jit'] = args['jit']\n\n if output_file:\n file_dict = {\"output_file\":output_file}\n else:\n file_dict = {\"output_file\": \"despasito_out.txt\"}\n\n logger.debug(\"EOS dict:\", eos_dict)\n logger.debug(\"Thermo dict:\", thermo_dict)\n logger.info(\"Finish processing input file: {}\".format(filename))\n \n eos = eos_mod(**eos_dict)\n \n # Run either parametrization or thermodynamic calculation\n if \"opt_params\" in list(thermo_dict.keys()):\n logger.info(\"Initializing parametrization procedure\")\n output_dict = fit(eos, thermo_dict)\n #output = fit(eos, thermo_dict)\n logger.info(\"Finished parametrization\")\n write_output.writeout_fit_dict(output_dict,eos,**file_dict)\n else:\n logger.info(\"Initializing thermodynamic calculation\")\n output_dict = thermo(eos, thermo_dict)\n logger.info(\"Finished thermodynamic calculation\")\n write_output.writeout_thermo_dict(output_dict,thermo_dict[\"calculation_type\"],**file_dict)", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def operate_cipher(self):" ]
[ "0.5198607", "0.5059591", "0.5001696", "0.4993821", "0.49845788", "0.49618042", "0.49262732", "0.4923952", "0.49109218", "0.48413166", "0.4839598", "0.47862753", "0.47585097", "0.47456512", "0.4702502", "0.46877113", "0.46357515", "0.463135", "0.46262878", "0.46126473", "0.45703632", "0.45676038", "0.45507565", "0.45367298", "0.4513278", "0.45083284", "0.4489874", "0.448373", "0.4479839", "0.44775537", "0.44666913", "0.44642898", "0.44539884", "0.44305393", "0.44125995", "0.44086745", "0.44078523", "0.43927428", "0.43869036", "0.43845826", "0.43832815", "0.4382942", "0.43725795", "0.43725795", "0.43657395", "0.43493563", "0.43406796", "0.43405673", "0.43381378", "0.4337805", "0.43344256", "0.43333682", "0.43281063", "0.43270677", "0.43267933", "0.43227357", "0.43131554", "0.43056566", "0.43029025", "0.43026972", "0.42945984", "0.4293986", "0.42829737", "0.42779237", "0.42760056", "0.42755848", "0.42714116", "0.42713022", "0.42691088", "0.42683506", "0.42671442", "0.426514", "0.42638332", "0.42604393", "0.42498752", "0.42477465", "0.42430863", "0.42410323", "0.4236127", "0.4233552", "0.42305258", "0.42263153", "0.42260805", "0.42253664", "0.4215333", "0.42146233", "0.42073604", "0.4204479", "0.42043406", "0.4192855", "0.4183921", "0.41838145", "0.4181138", "0.41780922", "0.41768524", "0.41736326", "0.4167968", "0.41653425", "0.41653305", "0.4165027" ]
0.48125994
11
TDE allows you to perform realtime I/O encryption and decryption on data files. Data is encrypted before it is written to a disk and is decrypted when it is read from the disk to the memory. For more information, see [Configure TDE](~~131048~~). > You cannot disable TDE after it is enabled.
def modify_dbinstance_tde( self, request: dds_20151201_models.ModifyDBInstanceTDERequest, ) -> dds_20151201_models.ModifyDBInstanceTDEResponse: runtime = util_models.RuntimeOptions() return self.modify_dbinstance_tdewith_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")", "def save_data(self):\n\n #\n # t=self.t[0:-1:self.R].reshape([self.t[0:-1:self.R].shape[0],1])\n\n def deterministic_data():\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'DetParameters' + strPrefix + '.txt'\n name2 = 'DetSolution' + strPrefix + '.txt'\n name3 = 'DetRefSolution' + str(self.dt) + '.txt'\n\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n np.savetxt(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n\n def stochastic_data():\n \"\"\"\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n Utem1 = self.Xtem[:, 0]\n Utem2 = self.Xtem[:, 1]\n Utem3 = self.Xtem[:, 2]\n \"\"\"\n\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'StoParameters' + strPrefix + '.txt'\n '''\n name2 = 'StoSolution' + strPrefix + '.txt'\n name3 = 'StoRefSolution' + str(self.dt) + '.txt'\n '''\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n '''\n np.save(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3, Utem1,\n Utem2, Utem3\n )\n ))\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3\n )\n ))\n if self.sigma1 == 0.0:\n if self.sigma2 == 0.0:\n DeterministicData()\n return\n StochasticData()\n '''\n\n return", "def setup_tdd(self,earfcn,bwMhz,powerdBm,ud_config,special_sf_config=0,ul_timing_advance=0,with_rx=False):\r\r\n\r\r\n self.setup_modem()\r\r\n self.instr.setup_4g_tx_test(cable_loss_dB=self.testConfig.cable_loss)\r\r\n self.teststep_idx = 0\r\r\n band,freq_ul,freq_dl = lte_util.get_lte_ul_dl_freq_band(earfcn)\r\r\n\r\r\n self.set_band(band=band)\r\r\n self.modemObj.set_rat_band(rat='LTE', band=band)\r\r\n duplex_mode = self.get_duplex_mode()\r\r\n assert(duplex_mode == \"TDD\")\r\r\n self.instr.lte_tx.set_duplex_mode(duplex_mode=duplex_mode)\r\r\n self.instr.lte_tx.set_band(band=band)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul)\r\r\n self.instr.lte_tx.set_rf_freqMHz(freqMHz=freq_ul)\r\r\n self.set_bw(bwMHz=bwMhz)\r\r\n rf_config = LTE_rf_config(bwMHz=bwMhz)\r\r\n self.modemObj.set_rb(direction='ul', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb(direction='dl', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb_start(rb_offset=rf_config.rb_offset)\r\r\n self.modemObj.set_rb_len(rb_len=rf_config.rb_len)\r\r\n rf_config.check_config()\r\r\n self.instr.lte_tx.set_channel_bw_MHz(bwMHz=bwMhz)\r\r\n self.modemObj.send_ul_pattern()\r\r\n\r\r\n self.set_ud_config(ud_config)\r\r\n self.modemObj.set_ud_config(ud_config)\r\r\n self.instr.lte_tx.set_ul_dl_conf(ud_config)\r\r\n\r\r\n self.modemObj.enable_tx()\r\r\n\r\r\n bursted = not (ud_config==\"TEST0\" or ud_config==\"TEST1\")\r\r\n self.setup_tdd_trigger(bursted,special_sf_config)\r\r\n\r\r\n self.modemObj.set_special_sf_config(special_sf_config)\r\r\n self.instr.lte_tx.set_special_subframe_conf(special_sf_config)\r\r\n\r\r\n self.modemObj.set_ul_timing_advance(ul_timing_advance)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.instr.lte_tx.set_rf_exp_power(power_dBm=powerdBm+5)\r\r\n self.instr.waitForCompletion()\r\r\n\r\r\n if with_rx:\r\r\n assert(bursted)\r\r\n self.modemObj.set_freqMHz(direction='rx',freqMHz=freq_dl)\r\r\n self.modemObj.set_rxagc_auto(ant='m')\r\r\n self.modemObj.enable_rx(ant='m')\r\r\n\r\r\n self.set_test_afc_val()\r\r\n\r\r\n return bursted", "def main():\n\n # performs crib dragging using initial values\n plaintext1, plaintext2 = crib_drag('', '', 0, 0)\n\n if plaintext1 is None or plaintext2 is None:\n print('No possible English decryption using the current dictionary')\n return\n\n # find the key and creates file with results\n plaintext1 = plaintext1[:CIPHER_LEN]\n plaintext2 = plaintext2[:CIPHER_LEN]\n key = find_key(plaintext1, plaintext2)\n\n with open('plaintext1.txt', 'w') as plain_file:\n plain_file.write(plaintext1)\n with open('plaintext2.txt', 'w') as plain_file:\n plain_file.write(plaintext2)\n with open('key.txt', 'wb') as plain_file:\n plain_file.write(key)", "def TestTDDFT():\n prm = '''\n Model\tTDHF\n Method\tMMUT\n dt\t0.02\n MaxIter\t100\n ExDir\t1.0\n EyDir\t1.0\n EzDir\t1.0\n FieldAmplitude\t0.01\n FieldFreq\t0.9202\n ApplyImpulse\t1\n ApplyCw\t\t0\n StatusEvery\t10\n '''\n geom = \"\"\"\n H 0. 0. 0.\n H 0. 0. 0.9\n H 2.0 0. 0\n H 2.0 0.9 0\n \"\"\"\n output = re.sub(\"py\",\"dat\",sys.argv[0])\n mol = gto.Mole()\n mol.atom = geom\n mol.basis = 'sto-3g'\n mol.build()\n the_scf = pyscf.dft.RKS(mol)\n the_scf.xc='HF'\n print \"Inital SCF finished. E=\", the_scf.kernel()\n aprop = tdscf.tdscf(the_scf,prm,output)\n return", "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def test_single_dft():\n test_file = os.path.join(DATA_DIR, 'test39_dft.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -4.8538264773648E+02 * Ha # energy in eV\n assert info['k'] == '6x6x6' # Monkhorst-Pack net\n assert info['H'] == \"LDA/PZ_LDA\"\n assert info['ncycles'][0] == 9\n assert info['electrons']['basis_set']['ecp']['Ge'][0][1] == (0.82751, -1.26859, -1)\n assert info['electrons']['basis_set']['bs']['Ge'][0][1] == (1.834, 0.4939, 0.006414)", "def enable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = True\n\t\tself.change_TTS_engine()", "def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt", "def tdd():\n\n with lcd(FRONTENDDIR):\n cmd = '%(gulp)s tdd' % {'gulp': get_gulp()}\n local(cmd)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = False\n\t\tself.change_TTS_engine()", "def main(ctx, access_key, host, debug):\n info = {\n \"access_key\": access_key,\n \"host\": host,\n \"DEBUG\": debug\n }\n\n _tda = None\n\n if access_key == \"\":\n configFile = _config_filepath()\n if os.path.exists(configFile):\n with open(configFile, \"r\", encoding=\"utf-8\") as cf:\n if cf.read() != \"\":\n info = _getConf()\n info[\"DEBUG\"] = debug\n\n if info[\"access_key\"] != \"\":\n _tda = TDA(info[\"access_key\"], info[\"host\"])\n if info[\"DEBUG\"]:\n _tda.Debug()\n\n ctx.obj = _tda", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def encrypt():\n print(\"Use sops to encrypt the file.\")\n print(\"Learn more at https://github.com/mozilla/sops\")", "def test_tte4(self):\n filename = str(self.temp_j2k_filename)\n xtx4_setup(filename)\n self.assertTrue(True)", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t# #Start Checking the Platform\n\t\t\t# if platform.system() == 'Windows':\n\t\t\t# \tself.path = self.path.split('\\\\')[-1]\n\t\t\t# elif platform.system() == 'Linux':\n\t\t\t# \tself.path = self.path.split('/')[-1]\n\t\t\t# # END Checking\n\t\t\t# print('Decryption of '+ self.path +\"...\")\n\t\t\t######################### Blowfish Decryption Algorithm ###############\n\t\t\tbs = Blowfish.block_size\n\t\t\trealData = base64.b64decode(file_data)[8:]\n\t\t\tiv = base64.b64decode(file_data)[:8]\n\t\t\tdecrypt = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tself.decrypt = decrypt.decrypt(realData)\n\t\t\t########################### End Blowfish #########################\n\t\t\t#print('Writing in your file...')\n\t\t\tself.out = self.path.replace(self.extension,'')\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.out,'wb') as outfile:\n\t\t\t\toutfile.write(self.decrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint(\"Done in \",time.time() - t)\n\t\t\t\n\t\telse:\n\t\t\tprint('The File is Not Encrypted To Decrypted.')", "def get_TTS_data(self, exchanged_token, exchange=False):\n if os.path.exists(self.lock_file):\n ctime = os.stat(self.lock_file).st_ctime\n age = time.time() - ctime\n if age < self.age:\n self.log.error(\"Update already in progres. Sleeping ..\")\n time.sleep(self.age - age)\n else:\n self.log.error(\"Stale lock file, removing ...\")\n os.remove(self.lock_file)\n open(self.lock_file, 'w+').close()\n\n if exchange:\n with file('/tmp/refresh_token') as f:\n refresh_token = f.read()\n self.exchanged_token = self.refresh_token(self.client_id, self.client_secret, refresh_token.strip())\n if isinstance(self.exchanged_token, int):\n self.log.error(\"refresh_token error\")\n\n if self.get_certificate(self.credential_endpoint):\n # load json and prepare objects\n with open('/tmp/output.json') as tts_data_file:\n tts_data = json.load(tts_data_file)\n \n f = open(self.user_cert, 'w+')\n f.write(str(tts_data['credential']['entries'][0]['value']))\n f.close()\n \n f = open(self.user_key, 'w+')\n f.write(str(tts_data['credential']['entries'][1]['value']))\n f.close()\n \n f = open(self.user_passwd, 'w+')\n f.write(str(tts_data['credential']['entries'][2]['value']))\n f.close()\n \n try:\n os.chmod(self.user_key, 0600)\n except OSError, e:\n self.log.error(e)\n self.log.error(\"Permission denied to chmod passwd file\")\n return False\n \n os.remove(self.lock_file)\n \n return True\n else:\n return False", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(path, key, default, output, url, token, vaultpath):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n data = get_config(path, file_type, default=False)\n data = decrypt_credentials(data, key)\n\n # Only merge the DEFAULT section after decrypting.\n if default:\n data = merge_default(data)\n\n if url:\n try:\n import hvac\n except:\n print('''\nTo use Hashicorp's Vault you must install the hvac package.\nTo install it try using the following command:\n\n pip install hvac\n''')\n exit(3)\n\n if not token:\n token = os.environ.get('VAULT_TOKEN', '')\n if not token:\n token = getpass('Vault token: ')\n \n client = hvac.Client(url=url, token=token)\n if not vaultpath:\n vaultpath = path\n\n if vaultpath[0] == '~':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '/':\n vaultpath = vaultpath[1:]\n\n data = merge_default(data)\n for heading in data:\n # kargs = { heading: json.dumps(data[heading]) }\n client.write(vaultpath + '/' + heading, **data[heading])\n\n else:\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n config_ini = configparser.ConfigParser(\n dict_type=OrderedDict,\n default_section=default_section,\n interpolation=None)\n for heading in data:\n config_ini.add_section(heading)\n for item in data[heading]:\n config_ini.set(heading, item, data[heading][item])\n config_ini.write(save_file)\n\n else:\n write_yaml(save_file, data)", "def showTF(tf,outDir):\n\n nlo2lo,data2lo,data2nlo,data2lo_A,data2nlo_A=tf\n\n c=ROOT.TCanvas('c','c',500,500)\n c.SetBottomMargin(0)\n c.SetTopMargin(0)\n c.SetLeftMargin(0)\n c.SetRightMargin(0)\n c.cd()\n\n p1=ROOT.TPad('p1','p1',0,0.5,1,1.0)\n p1.Draw()\n p1.SetRightMargin(0.03)\n p1.SetLeftMargin(0.12)\n p1.SetTopMargin(0.1)\n p1.SetBottomMargin(0.01)\n p1.SetGridy()\n p1.cd()\n nlo2lo.Draw('e2')\n nlo2lo.GetYaxis().SetTitle('Z ratio')\n nlo2lo.GetYaxis().SetNdivisions(5)\n nlo2lo.GetXaxis().SetTitleSize(0)\n nlo2lo.GetXaxis().SetLabelSize(0)\n nlo2lo.GetYaxis().SetTitleSize(0.08)\n nlo2lo.GetYaxis().SetTitleOffset(0.8)\n nlo2lo.GetYaxis().SetLabelSize(0.08)\n nlo2lo.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo.Draw('e1same')\n data2nlo.Draw('e1same')\n\n leg1=p1.BuildLegend(0.7,0.88,0.95,0.66)\n leg1.SetFillStyle(0)\n leg1.SetBorderSize(0)\n leg1.SetTextFont(42)\n leg1.SetTextSize(0.06)\n\n l1=ROOT.TLine()\n l1.SetLineWidth(2)\n l1.SetLineColor(ROOT.kBlue)\n l1.DrawLine(data2lo.GetXaxis().GetXmin(),1,data2lo.GetXaxis().GetXmax(),1)\n\n txt=ROOT.TLatex()\n txt.SetNDC(True)\n txt.SetTextFont(42)\n txt.SetTextSize(0.08)\n txt.SetTextAlign(12)\n txt.DrawLatex(0.12,0.95,'#bf{CMS} #it{preliminary}')\n p1.RedrawAxis()\n\n c.cd()\n p2=ROOT.TPad('p2','p2',0,0,1,0.5)\n p2.SetRightMargin(0.03)\n p2.SetLeftMargin(0.12)\n p2.SetTopMargin(0.01)\n p2.SetBottomMargin(0.18)\n p2.SetGridy()\n p2.Draw()\n p2.cd()\n data2lo_A.Draw('e1')\n data2lo_A.GetYaxis().SetTitle('#gamma ratio')\n data2lo_A.GetYaxis().SetNdivisions(5)\n data2lo_A.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo_A.GetXaxis().SetTitleSize(0.08)\n data2lo_A.GetXaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleSize(0.08)\n data2lo_A.GetYaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleOffset(0.8)\n data2nlo_A.Draw('e1same')\n \n leg2=p2.BuildLegend(0.7,0.94,0.95,0.80)\n leg2.SetFillStyle(0)\n leg2.SetBorderSize(0)\n leg2.SetTextFont(42)\n leg2.SetTextSize(0.06)\n \n l2=ROOT.TLine()\n l2.SetLineColor(ROOT.kBlue)\n l2.SetLineWidth(2)\n l2.DrawLine(data2lo_A.GetXaxis().GetXmin(),1,data2lo_A.GetXaxis().GetXmax(),1)\n\n p2.RedrawAxis()\n\n c.cd()\n c.Modified()\n c.Update()\n for ext in ['png','pdf']:\n c.SaveAs('{0}.{1}'.format(outDir,ext))", "def enable_tee(self):\n self._tee = True", "def test_simulation_persistence(compression, tmp_path):\n path = tmp_path / \"test_simulation_persistence.hdf5\"\n storage = FileStorage(path, compression=compression)\n\n # write some simulation data\n pde = DiffusionPDE()\n grid = UnitGrid([16, 16]) # generate grid\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n pde.solve(state, t_range=0.11, dt=0.001, tracker=storage.tracker(interval=0.05))\n storage.close()\n\n # read the data\n storage = FileStorage(path)\n np.testing.assert_almost_equal(storage.times, [0, 0.05, 0.1])\n data = np.array(storage.data)\n assert data.shape == (3,) + state.data.shape\n grid_res = storage.grid\n assert grid == grid_res\n grid_res = storage.grid\n assert grid == grid_res", "def louder():\n try:\n ttsEng.louder()\n except Exception, e:\n logging.error(e)", "async def modify_dbinstance_tde_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return await self.modify_dbinstance_tdewith_options_async(request, runtime)", "def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def tesselate(options):\n if not options.freplace:\n if len(options.args) != 2:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file and '\n 'one output slf file\\n')\n i3s_file = options.args[0]\n out_file = options.args[1]\n else:\n if len(options.args) != 1:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file\\n')\n i3s_file = options.args[0]\n head, _ = path.splitext(i3s_file)\n out_file = head+'.slf'\n\n i3s_file = path.realpath(i3s_file)\n if not path.exists(i3s_file):\n raise TelemacException(\\\n '\\nCould not find '\n 'the file named: {}'.format(i3s_file))\n\n print('\\n\\nTessellating ' + path.basename(i3s_file) + ' within ' + \\\n path.dirname(i3s_file) + '\\n'+'~'*72+'\\n')\n i2s = InS(i3s_file)\n ikle2, ipob2, meshx, meshy = tessellate_poly(i2s, debug=True)\n\n print('\\n\\nWriting down the Selafin file ' + \\\n path.basename(out_file) + '\\n'+'~'*72+'\\n')\n slf = Selafin('')\n slf.title = ''\n slf.nplan = 1\n slf.ndp2 = 3\n slf.ndp3 = 3\n slf.nbv1 = 1\n slf.nvar = 1\n slf.varindex = 1\n slf.varnames = ['BOTTOM ']\n slf.varunits = ['M ']\n slf.ikle2 = ikle2\n slf.ikle3 = slf.ikle2\n slf.meshx = meshx\n slf.meshy = meshy\n slf.npoin2 = i2s.npoin\n slf.npoin3 = slf.npoin2\n slf.nelem2 = len(slf.ikle2)/slf.ndp3\n slf.nelem3 = slf.nelem2\n slf.iparam = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\n slf.ipob2 = ipob2\n slf.ipob3 = slf.ipob2\n slf.fole = {'hook':open(out_file, 'wb'), 'endian':\">\",\n 'float':('f', 4), 'name':out_file}\n slf.tags['times'] = [1]\n if options.sph2ll != None:\n radius = 6371000.\n long0, lat0 = options.sph2ll.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n const = np.tan(lat0/2. + np.pi/4.)\n slf.meshx = np.rad2deg(slf.meshx/radius + long0)\n slf.meshy = np.rad2deg(2.*np.arctan(const*np.exp(slf.meshy/radius)) \\\n - np.pi/2.)\n if options.ll2sph != None:\n radius = 6371000.\n long0, lat0 = options.ll2sph.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n slf.meshx = radius * (np.deg2rad(slf.meshx) - long0)\n slf.meshy = radius * \\\n (np.log(np.tan(np.deg2rad(slf.meshy)/2. + np.pi/4.)) \\\n - np.log(np.tan(lat0/2. + np.pi/4.)))\n if options.ll2utm != None:\n zone = int(options.ll2utm)\n slf.meshx, slf.meshy, zone = utm.from_lat_long(slf.meshx, slf.meshy,\n zone)\n if options.utm2ll != None:\n zone = int(options.utm2ll)\n slf.meshx, slf.meshy = utm.to_lat_long(slf.meshx, slf.meshy, zone)\n slf.append_header_slf()\n slf.append_core_time_slf(0)\n slf.append_core_vars_slf([np.zeros(slf.npoin2)])\n slf.fole['hook'].close()", "def decrypt(self, data):", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def crypto_run(node_name, op_type, key, iv, in_text, out_text, tpm):\n mode_name, submode_name = node_name.split(':')\n submode_name = submode_name[:3].upper()\n\n mode = SUPPORTED_MODES.get(mode_name.upper())\n if not mode:\n raise subcmd.TpmTestError('unrecognizable mode in node \"%s\"' % node_name)\n\n submode = mode.submodes.get(submode_name, 0)\n cmd = '%c' % op_type # Encrypt or decrypt\n cmd += '%c' % submode # A particular type of a generic algorithm.\n cmd += '%c' % len(key)\n cmd += key\n cmd += '%c' % len(iv)\n if iv:\n cmd += iv\n cmd += struct.pack('>H', len(in_text))\n cmd += in_text\n if tpm.debug_enabled():\n print('%d:%d cmd size' % (op_type, mode.subcmd),\n len(cmd), utils.hex_dump(cmd))\n wrapped_response = tpm.command(tpm.wrap_ext_command(mode.subcmd, cmd))\n real_out_text = tpm.unwrap_ext_response(mode.subcmd, wrapped_response)\n if out_text:\n if len(real_out_text) > len(out_text):\n real_out_text = real_out_text[:len(out_text)] # Ignore padding\n if real_out_text != out_text:\n if tpm.debug_enabled():\n print('Out text mismatch in node %s:\\n' % node_name)\n else:\n raise subcmd.TpmTestError(\n 'Out text mismatch in node %s, operation %d:\\n'\n 'In text:%sExpected out text:%sReal out text:%s' % (\n node_name, op_type,\n utils.hex_dump(in_text),\n utils.hex_dump(out_text),\n utils.hex_dump(real_out_text)))\n return real_out_text", "def test(DATASET=\"Texas\", CONFIG=None):\n if CONFIG is None:\n CONFIG = get_config_kACE(DATASET)\n print(f\"Loading {DATASET} data\")\n x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)\n if tf.config.list_physical_devices(\"GPU\") and not CONFIG[\"debug\"]:\n C_CODE = 3\n print(\"here\")\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [50, 50, C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [50, 50, C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_Y]},\n }\n else:\n print(\"why here?\")\n C_CODE = 1\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [C_Y]},\n }\n print(\"Change Detector Init\")\n cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)\n print(\"Training\")\n training_time = 0\n cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)\n for epochs in CONFIG[\"list_epochs\"]:\n CONFIG.update(epochs=epochs)\n tr_gen, dtypes, shapes = datasets._training_data_generator(\n x_im[0], y_im[0], cross_loss_weight[0], CONFIG[\"patch_size\"]\n )\n TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)\n TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)\n for x, y, _ in EVALUATE.batch(1):\n alpha = cd([x, y])\n cross_loss_weight = 1.0 - alpha\n training_time += tr_time\n\n cd.load_all_weights(cd.log_path)\n cd.final_evaluate(EVALUATE, **CONFIG)\n metrics = {}\n for key in list(cd.difference_img_metrics.keys()) + list(\n cd.change_map_metrics.keys()\n ):\n metrics[key] = cd.metrics_history[key][-1]\n metrics[\"F1\"] = metrics[\"TP\"] / (\n metrics[\"TP\"] + 0.5 * (metrics[\"FP\"] + metrics[\"FN\"])\n )\n timestamp = cd.timestamp\n epoch = cd.epoch.numpy()\n speed = (epoch, training_time, timestamp)\n del cd\n gc.collect()\n return metrics, speed", "def enableDestruction(self):\n self.destructable = True", "def set_dft(self, value):\n self.dft = value", "def SPIjedec(self):\n data=[0x9f, 0, 0, 0];\n data=self.SPItrans(data);\n jedec=0;\n self.JEDECmanufacturer=ord(data[1]);\n if self.JEDECmanufacturer==0xFF:\n self.JEDECtype=0x20;\n self.JEDECcapacity=0x14;\n jedec=0x202014;\n else:\n self.JEDECtype=ord(data[2]);\n self.JEDECcapacity=ord(data[3]);\n jedec=(ord(data[1])<<16)+(ord(data[2])<<8)+ord(data[3]);\n self.JEDECsize=self.JEDECsizes.get(self.JEDECcapacity);\n if self.JEDECsize==None:\n self.JEDECsize=0;\n \n if jedec==0x1F4501:\n self.JEDECsize=1024**2;\n self.JEDECdevice=jedec;\n return data;", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def _phi_ode(self, t, z):\n # prep states and phi_matrix\n state = z[0:len(self.istate)]\n ad_state = make_ad(state)\n phi_flat = clear_ad(z[len(self.istate):])\n phi = np.reshape(phi_flat, (len(self.istate),\n len(self.istate)))\n\n # find the accelerations and jacobian\n state_deriv = self.force_model.ode(0, ad_state)\n a_matrix = jacobian(self.force_model.ode(0, ad_state),\n ad_state)\n\n # compute the derivative of the STM and repackage\n phid = np.matmul(a_matrix, phi)\n phid_flat = phid.flatten()\n z_out = np.concatenate((state_deriv, phid_flat))\n\n return z_out", "def test_tte2(self):\n filename = str(self.temp_j2k_filename)\n xtx2_setup(filename)\n self.assertTrue(True)", "def test_encrypt_decrypt(self):\n with open(self.file_path, \"rt\") as file:\n start_file = file.read()\n nonce1 = globals.generate_random_nonce()\n nonce2 = globals.generate_random_nonce()\n encrypted_file_path, additional_data = self.file_crypt.encrypt_file(\n self.file_path,\n nonce1,\n nonce2)\n file_decrypted = self.file_crypt.decrypt_file(\n file_path=encrypted_file_path,\n additional_data=additional_data)\n with open(file_decrypted, \"rt\") as file:\n end_file = file.read()\n self.assertEqual(start_file, end_file, \"Files differ!\")", "def create_tvel_file(\n depth: np.array,\n vp: np.array,\n vs: np.array,\n dens: np.array,\n save_folder: str,\n name: str = \"Test\",\n):\n\n assert (\n len(depth) == len(vp) and len(depth) == len(vs) and len(depth) == len(dens)\n ), \"All arrays (depth, vp, vs and dens) should be of same length\"\n\n \"\"\" combining all the data vector \"\"\"\n data = np.vstack((np.vstack((np.vstack((depth, vp)), vs)), dens)).T\n\n with open(join(save_folder, f\"{name}.tvel\"), \"w\") as f:\n f.write(\"# Input file for TauP\\n\")\n f.write(\"NAME TAYAK_BKE\\n\")\n for line in data:\n f.write(f\"{line[0]:8.2f}{line[1]:8.3f}{line[2]:8.3f}{line[3]:8.3f}\\n\")\n f.write(\n \"\"\" 1596.98 4.986 0.000 5.855\n 1853.05 5.150 0.000 6.025\n 2109.13 5.284 0.000 6.166\n 2365.20 5.393 0.000 6.280\n 2621.27 5.475 0.000 6.368\n 2877.35 5.534 0.000 6.430\n 3133.42 5.569 0.000 6.467\n 3389.50 5.569 0.000 6.467\"\"\"\n )\n f.close()", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def t_xtide(*varargin):\n nargin = len(varargin)\n if nargin > 0:\n varargin = varargin[0]\n if not os.path.exists('t_xtide.mat'):\n # Read the harmonics file and make a mat file\n filnam = '/usr/share/xtide/harmonics.txt'\n fprintf(\"\\\\n********Can't find mat-file t_xtide.mat ********\\\\n\\\\n\")\n fprintf('Attempting to generate one from an xtide harmonics file....\\\\n\\\\n')\n fprintf('Latest version available from http://bel-marduk.unh.edu/xtide/files.html\\\\n\\\\n')\n # Input name\n fid = - 1\n while fid == - 1:\n\n rep = filnam\n while (lower(rep[0]) != 'y'):\n\n filnam = rep\n rep = 'n'\n rep = input_('Harmonics filename: ' + filnam + '? (y/Y/new file name):', 's')\n if (0 in rep.shape):\n rep = 'y'\n\n fid = open(filnam)\n if fid == - 1:\n fprintf(\"\\\\n****** Can't open filename ->\" + filnam + '<-\\\\n\\\\n')\n\n fprintf('Reading harmonics file (this will take a while)\\\\n')\n xtide, xharm = read_xtidefile(fid) # nargout=2\n fprintf('Saving harmonic information to t_xtide.mat\\\\n')\n savemat('t_xtide', 'xtide', 'xharm')\n else:\n loadmat('t_xtide',matlab_compatible=True)\n if nargin > 0:\n if isstr(varargin[0]):\n # Station name given\n # Identify station - look for exact match first\n ista = strmatch(lower(varargin[0]), lower(xharm.station), 'exact')\n # otherwise go for partial matches\n if (0 in ista.shape):\n # First check to see if a number was selected:\n inum = - 10\n while inum < - 1:\n\n inum = inum + 1\n ll = findstr(lower(varargin[0]), sprintf('(\\n %d)', - inum))\n if not (0 in ll.shape):\n inum = abs(inum)\n varargin[0] = deblank(varargin[0](range(1, (ll - 1 +1))))\n\n ista = strmatch(lower(varargin[0]), lower(xharm.station))\n if max(ista.shape) > 1:\n if inum > 0 & inum <= max(ista.shape):\n ista = ista[(inum -1)]\n else:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n if max(ista.shape) == 1 & inum > 1:\n fprintf(\"***Can't find variant (\\n %d) of station - Taking only choice\\\\n\", inum)\n else:\n if max(ista.shape) == 0:\n error('Could not match station')\n varargin[0] = np.array([])\n else:\n # Lat/long?\n dist, hdg = t_gcdist(xharm.latitude, xharm.longitude, varargin[1], varargin[0]) # nargout=2\n mind, ista = np.min(dist) # nargout=2\n if max(ista.shape) > 1:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n fprintf('\\n %5d: \\n %s\\\\n', ista, deblank(xharm.station(ista, :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista), xharm.latitude(ista))\n varargin[0:2] = np.array([])\n # Time vector (if available) otherwise take current time.\n if max(varargin.shape) > 0 & not isstr(varargin[0]):\n tim = varargin[0]\n tim = tim[:].T\n varargin[0] = np.array([])\n if max(tim.shape) == 1:\n if tim < 1000:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, (tim +1), 1 / 48)]).reshape(1, -1)\n else:\n tim = tim + np.array([range(0, 3, 1 / 48)]).reshape(1, -1)\n # 2 days worth.\n else:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, 49, 0.25)]).reshape(1, -1) / 24\n # Parse properties\n format_ = 'raw'\n unt = 'original'\n k = 1\n while max(varargin.shape) > 0:\n\n if 'for' == lower(varargin[-1](range(1, 4))):\n format_ = lower(varargin[1])\n else:\n if 'uni' == lower(varargin[-1](range(1, 4))):\n unt = lower(varargin[1])\n else:\n error(\"Can't understand property:\" + varargin[0])\n varargin[(np.array([1, 2]).reshape(1, -1) -1)] = np.array([])\n\n # if we want a time series\n pred = np.array([])\n # Convert units if requested.\n units, convf = convert_units(unt, xharm.units(ista, :)) # nargout=2\n if format_[0:2] == 'ra' | format_[0:2] == 'fu' | format_[0:2] == 'ti':\n # Data every minute for hi/lo forecasting.\n if format_[0:2] == 'ti':\n tim = range(tim[0], (tim[-1] +1), (1 / 1440))\n # Convert into time since the beginning of year\n mid = datevec(mean(tim))\n iyr = mid[0] - xtide.startyear + 1\n lt = max(tim.shape)\n xtim = np.dot((tim - datenum(mid[0], 1, 1)), 24)\n # Hours since beginning of year\n #-----------------------------------------------------\n # Sum up everything for the prediction!\n pred = xharm.datum(ista) + np.sum(repmat(xtide.nodefactor(:, iyr) * xharm.A(ista, :).T, 1, lt) * cos(np.dot((np.dot(xtide.speed, xtim) + repmat(xtide.equilibarg(:, iyr) - xharm.kappa(ista, :).T, 1, lt)), (pi / 180))), 1)\n #-----------------------------------------------------\n pred = np.dot(pred, convf)\n # Compute times of hi/lo from every-minute data\n if format_[0:2] == 'ti':\n # Check if this is a current station\n if not (0 in findstr('Current', xharm.station(ista, :)).shape):\n currents = 1\n else:\n currents = 0\n dpred = diff(pred)\n ddpred = diff(dpred > 0)\n flat = np.flatnonzero(ddpred != 0) + 1\n slk = np.flatnonzero(sign(pred[0:pred.shape[0] - 1]) != sign(pred[1:pred.shape[0]]))\n hi.mtime = tim[(flat -1)]\n hi.value = pred[(flat -1)]\n hi.type = np.zeros(shape=(flat.shape, flat.shape), dtype='float64')\n hi.type(np.flatnonzero(ddpred[(flat - 1 -1)] < 0)) = 1\n # 0=lo, 1=hi\n hi.units = deblank(units)\n pred = hi\n # Create information structure\n if format_[0:2] == 'in' | format_[0:2] == 'fu':\n if not (0 in pred.shape):\n pred.yout = pred\n pred.mtime = tim\n else:\n kk = np.flatnonzero(xharm.A(ista, :) != 0)\n pred.freq = xtide.name(kk, :)\n pred.A = np.dot(full(xharm.A(ista, kk).T), convf)\n pred.kappa = full(xharm.kappa(ista, kk).T)\n pred.station = deblank(xharm.station(ista, :))\n pred.longitude = xharm.longitude(ista)\n pred.latitude = xharm.latitude(ista)\n pred.timezone = xharm.timezone(ista)\n pred.units = deblank(units)\n pred.datum = np.dot(xharm.datum(ista), convf)\n # If no output parameters then we plot or display things\n if nargout == 0:\n if 'ti' == format_[(((0:2 -1) -1) -1)]:\n fprintf('High/Low Predictions for \\n %s\\\\n', xharm.station(ista, :))\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', xharm.timezone(ista))\n outstr = repmat(' ', max(flat.shape), 41)\n outstr[:, 0:20] = datestr(hi.mtime)\n outstr[:, 21:27] = reshape(sprintf('\\n %6.2f', hi.value), 6, max(flat.shape)).T\n if currents:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' Flood Tide', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Ebb Tide ', np.sum(ll), 1)\n else:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' High Tide ', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Low Tide ', np.sum(ll), 1)\n disp(outstr)\n else:\n if 'ra' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'fu' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred.yout)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'in' == format_[(((0:2 -1) -1) -1)]:\n fprintf('Station: \\n %s\\\\n', pred.station)\n if pred.longitude < 0:\n lon = 'W'\n else:\n lon = 'E'\n if pred.latitude < 0:\n lat = 'S'\n else:\n lat = 'N'\n fprintf(\"Location: \\n %d \\n %.1f' \\n %c, \\n %d \\n %.1f' \\n %c\\\\n\", fix(abs(pred.latitude)), np.dot(rem(abs(pred.latitude), 1), 60), lat, fix(abs(pred.longitude)), np.dot(rem(abs(pred.longitude), 1), 60), lon)\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', pred.timezone)\n clear('pred')\n #\n return pred", "def decryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n\n dec = Decrypt(infile)\n\n if mode.upper() == 'AES':\n decrypted_data = dec.AES(password)\n elif mode.upper() == 'DES':\n decrypted_data = dec.DES(password)\n elif mode.upper() == 'SALSA20':\n decrypted_data = dec.Salsa20(password)\n else:\n return 2\n\n if not decrypted_data:\n cleanup(outfile)\n return 3\n\n if not outfile.endswith(dec.extension):\n outfile += dec.extension\n write_data(decrypted_data, outfile)\n return 0", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def _dK_ode_dtheta(self, target):\r\n t_ode = self._t[self._index>0]\r\n dL_dK_ode = self._dL_dK[self._index>0, :]\r\n index_ode = self._index[self._index>0]-1\r\n if self._t2 is None:\r\n if t_ode.size==0:\r\n return \r\n t2_ode = t_ode\r\n dL_dK_ode = dL_dK_ode[:, self._index>0]\r\n index2_ode = index_ode\r\n else:\r\n t2_ode = self._t2[self._index2>0]\r\n dL_dK_ode = dL_dK_ode[:, self._index2>0]\r\n if t_ode.size==0 or t2_ode.size==0:\r\n return\r\n index2_ode = self._index2[self._index2>0]-1\r\n\r\n h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True)\r\n #self._dK_ddelay = self._dh_ddelay\r\n self._dK_dsigma = self._dh_dsigma\r\n\r\n if self._t2 is None:\r\n h2 = h1\r\n else:\r\n h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True)\r\n\r\n #self._dK_ddelay += self._dh_ddelay.T\r\n self._dK_dsigma += self._dh_dsigma.T\r\n # C1 = self.sensitivity\r\n # C2 = self.sensitivity\r\n\r\n # K = 0.5 * (h1 + h2.T)\r\n # var2 = C1*C2\r\n # if self.is_normalized:\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2\r\n # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2\r\n # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma))\r\n # dk_dC1 = C2 * sum(sum(dL_dK.*K))\r\n # dk_dC2 = C1 * sum(sum(dL_dK.*K))\r\n # else:\r\n # K = np.sqrt(np.pi) * K\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K))\r\n # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K))\r\n\r\n\r\n # dk_dSim1Variance = dk_dC1\r\n # Last element is the length scale.\r\n (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2)\r\n\r\n target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum()\r\n\r\n\r\n # # only pass the gradient with respect to the inverse width to one\r\n # # of the gradient vectors ... otherwise it is counted twice.\r\n # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance])\r\n # g2 = real([dk_dD2 0 dk_dSim2Variance])\r\n # return g1, g2\"\"\"\r", "def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]", "def example():\r\n path = os.path.abspath(os.path.dirname(__name__))\r\n module = CryptoModule()\r\n # create_name this is open source py module with confidential information\r\n opened_path = os.path.join(path, 'secret.py')\r\n # read_name this is open encrypted py module with confidential information\r\n secured_path = os.path.join(path, 'secured.py')\r\n # encrypt, read secret.py and create secured.py\r\n module.create_secured_module(path_to_opened_module=opened_path, path_to_secured_module=secured_path,\r\n create_key=True, delete_source_opened_module=False)\r\n # decrypt, read secured.py and create opened.py\r\n module.create_opened_module(path_to_secured_module=secured_path, path_to_opened_module=opened_path)\r\n print('ok')", "def test_tte1(self):\n filename = str(self.temp_j2k_filename)\n self.xtx1_setup(filename)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def detx(self, det_id, t0set=None, calibration=None):\n url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args\n if t0set is not None:\n url += '&t0set=' + t0set\n if calibration is not None:\n url += '&calibrid=' + calibration\n\n detx = self._get_content(url)\n return detx", "def unfreeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1\"\n ).wait()", "def main():\n \n # Ask for their option.\n \n inputFile = \"\"\n outputFile = \"\"\n \n choice = askOption()\n key = askForKey()\n \n inputFile = askInputFile()\n inputText = readText(inputFile)\n \n outputFile = askOutputFile()\n \n #Start the timer here.\n startTimer = time.time()\n \n # Depending on their choice, encode or decode.\n if choice == 'e':\n encryptedText = RouteCipher.encrypt(inputText, key)\n writeText(encryptedText, outputFile)\n elif choice == 'd':\n decryptedText = RouteCipher.decrypt(inputText, key)\n writeText(decryptedText, outputFile)\n \n finishTimer = time.time()\n totalTime = round(finishTimer - startTimer, 2)\n \n print(\"The operation was succesful\")\n print(f\"Total time needed: {totalTime}\")", "def StoreAntirollback(now, ar_filename, kern_f):\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)", "def test_dfunction_saveable(self):\n \n wa = FrequencyAxis(0.0, 1000, 0.1)\n \n fw = numpy.exp(-wa.data)\n \n fce = DFunction(wa,fw)\n \n #fce.plot()\n\n #with h5py.File(\"test_file_1\",driver=\"core\", \n # backing_store=False) as f:\n with tempfile.TemporaryFile() as f:\n \n fce.save(f, test=True)\n \n fce2 = DFunction()\n fce2 = fce2.load(f, test=True)\n \n #fce2.plot()\n \n numpy.testing.assert_array_equal(fce.data, fce2.data)", "def vmdexec(cmds):\n handle,filename=mkstemp(dir='/tmp')\n open(filename,'w').write(cmds)\n os.system('vmd -dispdev text -e %s'%filename) # run vmd in the terminal\n os.system('/bin/rm %s'%filename) # clean-up", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "def construct_TDI(self, t, Orbit):\n\t\n\tself.make_padded_delta_l(t)\n\n\tp12 = td.Phase(1,2, t, self.delta_l_padded[0,1,:])\n\tp21 = td.Phase(2,1, t, self.delta_l_padded[1,0,:])\n\n\tp13 = td.Phase(1,3, t, self.delta_l_padded[0,2,:])\n\tp31 = td.Phase(3,1, t, self.delta_l_padded[2,0,:])\n\n\tp23 = td.Phase(2,3, t, self.delta_l_padded[1,2,:])\n\tp32 = td.Phase(3,2, t, self.delta_l_padded[2,1,:])\n \n\tp12.FT_phase(Orbit)\n\tp21.FT_phase(Orbit)\n\tp13.FT_phase(Orbit)\n\tp31.FT_phase(Orbit)\n\tp23.FT_phase(Orbit)\n\tp32.FT_phase(Orbit)\n\n\ttdi_GW = td.TDI(p12, p21, p13, p31, p23, p32, Orbit)\n\t\n\treturn tdi_GW", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def dftb_dftd3(third_ord, damp_flag, damp_exp):\n dftb_dftd3=\"\"\"\n ThirdOrderFull = {{ third_ord }}\n DampXH = {{ damp_flag }}\n DampXHExponent = {{ damp_exp }}\n Dispersion = DftD3{}\n}\n \"\"\"\n return Environment().from_string(dftb_dftd3).render(third_ord=third_ord, damp_flag=damp_flag, damp_exp=damp_exp)", "def test_encrypt_decrypt(self):\n reference = get_random_test_tensor()\n encrypted_tensor = SharedTensor(reference)\n self._check(encrypted_tensor, reference, 'en/decryption failed')", "def test_default_run_ubuntu_keep_vdmx():\n test_dir = os.path.join(\"tests\", \"test_files\", \"fonts\", \"temp\")\n notouch_inpath = os.path.join(\"tests\", \"test_files\", \"fonts\", \"Ubuntu-Regular.ttf\")\n test_inpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular.ttf\"\n )\n test_outpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular-dehinted.ttf\"\n )\n test_args = [test_inpath, \"--keep-vdmx\"]\n\n # setup\n if os.path.isdir(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n shutil.copyfile(notouch_inpath, test_inpath)\n\n # execute\n run(test_args)\n\n # test\n tt = TTFont(test_outpath)\n assert \"VDMX\" in tt\n\n # tear down\n shutil.rmtree(test_dir)", "def direct_mode_test(self,earfcn,bwMhz,powerdBm,ud_config,sf_sweep=False,with_rx=False):\r\r\n\r\r\n self.meas_list = ['FREQ_ERR','IQ_OFFSET', 'EVM']\r\r\n tol_dB = 1\r\r\n\r\r\n bursted = self.setup_tdd(earfcn,bwMhz,powerdBm,ud_config,with_rx=with_rx)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n\r\r\n # Note - Direct AGC value leads to different powers on different platforms\r\r\n # -- use driver mode and read back AGC value to get baseline,\r\r\n # then try that value in direct mode.\r\r\n dac_value = self.modemObj.query_txagc()\r\r\n\r\r\n # Set minimum power\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n # Set the original power, but as a direct gain DAC word this time.\r\r\n self.modemObj.set_txagc_direct(value=dac_value)\r\r\n\r\r\n sf_sweep = bursted and sf_sweep\r\r\n meas_sf_list = range(10) if sf_sweep else [2] # 2 is always UL\r\r\n for meas_sf in meas_sf_list:\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n if sf_is_uplink(ud_config, meas_sf):\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n else:\r\r\n # Non-UL subframe, do not expect signal\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n # Check going back to driver mode\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def test_tdwr():\n f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))\n assert f.prod_desc.prod_code == 182", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def test_tte5(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename)\n self.assertTrue(True)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def encrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension not in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t#Start To CHecking The PlatForm\n\t\t\t# if platform.system() == \"Windows\":\n\t\t\t# \tself.path_dir = self.path.split(\"\\\\\")[-1]\n\t\t\t# elif platform.system() == \"Linux\":\n\t\t\t# \tself.path_dir = self.path.split('/')[-1]\n\t\t\t# #End Checking Wich Platform\n\t\t\t# print('Encryption of '+self.path_dir+'...')\n\t\t\t# print('It\\'s may take a will')\n\t\t\t################################### Blowfish Algorithm ##############################\n\t\t\tbs = Blowfish.block_size\n\t\t\tiv = Random.new().read(bs)\n\t\t\tpadding = b\"}\"\n\t\t\tp = lambda s: s+(bs - len(s) % bs )*padding\n\t\t\tc= Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tencrypt = iv + c.encrypt(p(file_data))\n\t\t\tself.encrypt = base64.b64encode(encrypt) \n\t\t\t################################################################\n\t\t\t#print(\"writing in your file ...\")\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.path + self.extension,\"wb\") as newfile:\n\t\t\t\tnewfile.write(self.encrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint('Done In '+ time.time() -t)\n\t\telse:\n\t\t\tprint('The File is already encrypt.')", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def encryptor(file_name, key, plaintext):\n\twith open(file_name, 'w') as efile:\n\t\tenc = encrypt(key, plaintext)\n\t\tefile.write(enc)\n\t\tefile.close()\n\t\tetext = \"An encrypted passfile was created named key.enc for further use in this script by the user: \"\n\t\tcreateLog(etext, 'logs/macupdate.log')", "def disable_tee(self):\n self._tee = False", "def test_storage_truncation(tmp_path):\n file = tmp_path / \"test_storage_truncation.hdf5\"\n for truncate in [True, False]:\n storages = [MemoryStorage()]\n if module_available(\"h5py\"):\n storages.append(FileStorage(file))\n tracker_list = [s.tracker(interval=0.01) for s in storages]\n\n grid = UnitGrid([8, 8])\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n eq = DiffusionPDE()\n\n eq.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)\n if truncate:\n for storage in storages:\n storage.clear()\n eq.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)\n\n times = np.arange(0.1, 0.201, 0.01)\n if not truncate:\n times = np.r_[np.arange(0, 0.101, 0.01), times]\n for storage in storages:\n msg = f\"truncate={truncate}, storage={storage}\"\n np.testing.assert_allclose(storage.times, times, err_msg=msg)\n\n if any(platform.win32_ver()):\n for storage in storages:\n if isinstance(storage, FileStorage):\n storage.close()\n\n assert not storage.has_collection", "def change_TTS_engine(self):\n\t\t\n\t\tif self.isActiveDualTTS:\n\t\t\t#dual TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/dual_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is enabled. Using Amazon Polly TTS in case of internet connection, else use offline Picotts TTS.')\n\t\t\t\n\t\telse:\n\t\t\t#go back to single offline Picotts TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/single_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is disabled. Using offline Picotts TTS regardless of internect connection.')", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_ttd1(self):\n filename = str(self.temp_j2k_filename)\n\n # Produce the tte0 output file for ttd0 input.\n self.xtx1_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 128,\n 'y1': 128,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def test_ttd0(self):\n filename = str(self.temp_j2k_filename)\n ttx0_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 1000,\n 'y1': 1000,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def freeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:0\"\n ).wait()", "def setupForFTK(self):\n t1 = self.getKeyword('ISS CONF T1NAME').strip()\n t2 = self.getKeyword('ISS CONF T2NAME').strip()\n #swapped = self.getKeyword('ISS PRI STS'+t1[2]+' GUIDE_MODE').strip()\n\n fsub_pos_fri = self.maxSnrInScan(fsu='FSUB', opdc='OPDC', plot=1)\n fsua_pos_fri = self.maxSnrInScan(fsu='FSUA', opdc='OPDC', plot=2)\n print '---{'+self.insmode+'}---'\n if swapped == 'NORMAL':\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =',-fsub_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL1 NAME').strip(),\\\n '] = ',(fsub_pos_fri-fsua_pos_fri)\n else:\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =', fsua_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL2 NAME').strip(),\\\n '] = ',(fsua_pos_fri-fsub_pos_fri)\n return", "def setup_tdelta(self, dir1: str, num1: int, pos1: str, dir2: str, num2: int, pos2: str) -> None:\n cmd = ':measure:define deltatime,{0},{1},{2},{3},{4},{5}'.format(dir1, num1, pos1, dir2, num2, pos2)\n self.write(cmd)", "def decrypt(self, payload):\r\n\r\n #print(b'payload: %s'%(payload))\r\n decrypt1 = aes(self.ivkey, 2, self.staticiv)\r\n iv = decrypt1.decrypt(b'%s'%(payload['eiv']))\r\n #print(b'iv : %s'%(iv))\r\n decrypt2 = aes(b'%s'%(self.datakey), 2, b'%s'%(iv))\r\n temp = decrypt2.decrypt(b'%s'%(payload['ed']))\r\n #print(b'data : %s'%(temp))\r\n x_accel = int.from_bytes(temp[:4],\"big\")\r\n y_accel = int.from_bytes(temp[4:8],\"big\")\r\n z_accel = int.from_bytes(temp[8:12],\"big\")\r\n temp = float(temp[12:])\r\n print(x_accel,y_accel,z_accel,temp)\r\n temp1 = dict()\r\n \r\n temp1[\"value1\"] = str(x_accel)\r\n temp1[\"value2\"] = str(y_accel)\r\n temp1[\"value3\"] = str(z_accel)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = str(temp)\r\n temp1[\"value2\"] = str(self.nodeid)\r\n temp1[\"value3\"] = str(self.sessionID)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = ''\r\n temp1[\"value2\"] = ''\r\n temp1[\"value3\"] = ''\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n \r\n if self.x_accel == None or self.y_accel == None or self.z_accel == None:\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n elif abs(self.x_accel - x_accel) > 30 or abs(self.y_accel - y_accel) > 30 or abs(self.z_accel - z_accel) > 30:\r\n self.R_LED.value(1)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n else:\r\n self.R_LED.value(0)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n if self.temp == None:\r\n self.temp = temp\r\n \r\n elif abs(self.temp - temp) < 1:\r\n self.G_LED.freq(10)\r\n elif abs(self.temp - temp) >= 1:\r\n if 10 + (5 * int(temp - self.temp)) < 0:\r\n self.G_LED.freq(0)\r\n elif temp - self.temp <= -1:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n else:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n \r\n return \"Successful Decryption\"", "def fetch_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n uraw = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.nii.gz'\n ubval = 'http://dl.dropbox.com/u/2481924/tawian_ntu_dsi.bval'\n ubvec = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.bvec'\n ureadme = 'http://dl.dropbox.com/u/2481924/license_taiwan_ntu_dsi.txt'\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n\n md5_list = ['950408c0980a7154cb188666a885a91f', # data\n '602e5cb5fad2e7163e8025011d8a6755', # bval\n 'a95eb1be44748c20214dc7aa654f9e6b', # bvec\n '7fa1d5e272533e832cc7453eeba23f44'] # license\n\n url_list = [uraw, ubval, ubvec, ureadme]\n fname_list = ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw DSI data (91MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n print('See DSI203_license.txt for LICENSE.')\n print('For the complete datasets please visit :')\n print('http://dsi-studio.labsolver.org')\n\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def DirDE():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n dec.Asm.New_Label = ''\n return\n\n register = -1\n reg = assem.GetWord().upper()\n if (len(reg) == 2 or len(reg) == 3) and reg[0] == 'R':\n # Can it be a register name? Must be 2 or 3 chars long and start with R\n reg = reg[1:]\n if reg.isdigit:\n # The register number must be numeric of course\n if len(reg) == 1 or reg[0] != '0':\n # It is numeric, without a leading 0\n register = int(reg)\n if register < 0 or register > 31:\n # It is not a legal register\n errors.DoError('badoper', False)\n dec.Asm.New_Label = ''\n else:\n # It is a legal register, set it's value\n dec.Asm.BOL_Address = register\n dec.Asm.List_Address = register\n dec.Asm.Mnemonic = '.SE' # Handle rest like .SE\n\n # Ignore more parameters this time (like .EQ).", "def kv_esx_init():\n disk_lib_init()", "def test_once(config, qemu_img=False):\n\n iotests.log(\"# ================= %s %s =================\" % (\n \"qemu-img\" if qemu_img else \"dm-crypt\", config))\n\n oneKB = 1024\n oneMB = oneKB * 1024\n oneGB = oneMB * 1024\n oneTB = oneGB * 1024\n\n # 4 TB, so that we pass the 32-bit sector number boundary.\n # Important for testing correctness of some IV generators\n # The files are sparse, so not actually using this much space\n image_size = 4 * oneTB\n if qemu_img:\n iotests.log(\"# Create image\")\n qemu_img_create(config, image_size / oneMB)\n else:\n iotests.log(\"# Create image\")\n create_image(config, image_size / oneMB)\n\n lowOffsetMB = 100\n highOffsetMB = 3 * oneTB / oneMB\n\n try:\n if not qemu_img:\n iotests.log(\"# Format image\")\n cryptsetup_format(config)\n\n for slot in config.active_slots()[1:]:\n iotests.log(\"# Add password slot %s\" % slot)\n cryptsetup_add_password(config, slot)\n\n # First we'll open the image using cryptsetup and write a\n # known pattern of data that we'll then verify with QEMU\n\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Write test pattern 0xa7\")\n qemu_io_write_pattern(config, 0xa7, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Write test pattern 0x13\")\n qemu_io_write_pattern(config, 0x13, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n\n # Ok, now we're using QEMU to verify the pattern just\n # written via dm-crypt\n\n iotests.log(\"# Read test pattern 0xa7\")\n qemu_io_read_pattern(config, 0xa7, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Read test pattern 0x13\")\n qemu_io_read_pattern(config, 0x13, highOffsetMB, 10, dev=False)\n\n\n # Write a new pattern to the image, which we'll later\n # verify with dm-crypt\n iotests.log(\"# Write test pattern 0x91\")\n qemu_io_write_pattern(config, 0x91, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Write test pattern 0x5e\")\n qemu_io_write_pattern(config, 0x5e, highOffsetMB, 10, dev=False)\n\n\n # Now we're opening the image with dm-crypt once more\n # and verifying what QEMU wrote, completing the circle\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Read test pattern 0x91\")\n qemu_io_read_pattern(config, 0x91, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Read test pattern 0x5e\")\n qemu_io_read_pattern(config, 0x5e, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n finally:\n iotests.log(\"# Delete image\")\n delete_image(config)\n print", "def operate_cipher(self):", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def run(filename=\"input.json\", path=\".\", **args):\n\n logger = logging.getLogger(__name__)\n \n #read input file (need to add command line specification)\n logger.info(\"Begin processing input file: %s\" % filename)\n eos_dict, thermo_dict, output_file = read_input.extract_calc_data(filename, path, **args)\n eos_dict['jit'] = args['jit']\n\n if output_file:\n file_dict = {\"output_file\":output_file}\n else:\n file_dict = {\"output_file\": \"despasito_out.txt\"}\n\n logger.debug(\"EOS dict:\", eos_dict)\n logger.debug(\"Thermo dict:\", thermo_dict)\n logger.info(\"Finish processing input file: {}\".format(filename))\n \n eos = eos_mod(**eos_dict)\n \n # Run either parametrization or thermodynamic calculation\n if \"opt_params\" in list(thermo_dict.keys()):\n logger.info(\"Initializing parametrization procedure\")\n output_dict = fit(eos, thermo_dict)\n #output = fit(eos, thermo_dict)\n logger.info(\"Finished parametrization\")\n write_output.writeout_fit_dict(output_dict,eos,**file_dict)\n else:\n logger.info(\"Initializing thermodynamic calculation\")\n output_dict = thermo(eos, thermo_dict)\n logger.info(\"Finished thermodynamic calculation\")\n write_output.writeout_thermo_dict(output_dict,thermo_dict[\"calculation_type\"],**file_dict)" ]
[ "0.51985294", "0.50600356", "0.50008357", "0.49936765", "0.49839947", "0.49602553", "0.49248576", "0.49247414", "0.49126267", "0.48398393", "0.48395008", "0.48108664", "0.47869807", "0.47569895", "0.4744264", "0.47037157", "0.4688601", "0.46353415", "0.46312973", "0.4626369", "0.46131372", "0.45700085", "0.45672718", "0.45496738", "0.4536564", "0.45131868", "0.45089722", "0.4489431", "0.4483289", "0.44781137", "0.44769973", "0.4466658", "0.44649532", "0.44538534", "0.44298974", "0.4412921", "0.44076046", "0.440755", "0.4387576", "0.43838364", "0.4383596", "0.4382418", "0.43727472", "0.43727472", "0.43645084", "0.43507543", "0.43401808", "0.43401292", "0.4339117", "0.4338982", "0.4334919", "0.4333715", "0.43275335", "0.43261197", "0.43259275", "0.43235606", "0.43136737", "0.43069398", "0.43026572", "0.43002772", "0.42953813", "0.42936644", "0.42847776", "0.4277469", "0.4275886", "0.42752773", "0.4270519", "0.42693955", "0.4268867", "0.42680377", "0.4267188", "0.42644227", "0.42636308", "0.42596194", "0.42508385", "0.42458782", "0.4243887", "0.42417216", "0.42367524", "0.42350447", "0.42307627", "0.42268273", "0.4226781", "0.42246976", "0.42157528", "0.4214676", "0.42080277", "0.42047888", "0.4204762", "0.41937786", "0.4184817", "0.41823697", "0.41806543", "0.41770038", "0.41751164", "0.41744587", "0.41680625", "0.4165889", "0.416574", "0.41644755" ]
0.43913084
38
TDE allows you to perform realtime I/O encryption and decryption on data files. Data is encrypted before it is written to a disk and is decrypted when it is read from the disk to the memory. For more information, see [Configure TDE](~~131048~~). > You cannot disable TDE after it is enabled.
async def modify_dbinstance_tde_async( self, request: dds_20151201_models.ModifyDBInstanceTDERequest, ) -> dds_20151201_models.ModifyDBInstanceTDEResponse: runtime = util_models.RuntimeOptions() return await self.modify_dbinstance_tdewith_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")", "def save_data(self):\n\n #\n # t=self.t[0:-1:self.R].reshape([self.t[0:-1:self.R].shape[0],1])\n\n def deterministic_data():\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'DetParameters' + strPrefix + '.txt'\n name2 = 'DetSolution' + strPrefix + '.txt'\n name3 = 'DetRefSolution' + str(self.dt) + '.txt'\n\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n np.savetxt(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3,\n )\n ), fmt='%1.8f', delimiter='\\t')\n\n def stochastic_data():\n \"\"\"\n t = self.dt * self.tau\n Ueem1 = self.Xeem[:, 0]\n Ueem2 = self.Xeem[:, 1]\n Ueem3 = self.Xeem[:, 2]\n Uem1 = self.Xem[:, 0]\n Uem2 = self.Xem[:, 1]\n Uem3 = self.Xem[:, 2]\n Ustk1 = self.Xstkm[:, 0]\n Ustk2 = self.Xstkm[:, 1]\n Ustk3 = self.Xstkm[:, 2]\n Utem1 = self.Xtem[:, 0]\n Utem2 = self.Xtem[:, 1]\n Utem3 = self.Xtem[:, 2]\n \"\"\"\n\n tagPar = np.array([\n 'k = ',\n 'r = ',\n 'T0 = ',\n 'N = ',\n 'R = ',\n 'T = ',\n 'dt = ',\n 'Dt = ',\n 'L = ',\n 'gamma = ',\n 'eta = ',\n 'lambda =',\n 'delta = ',\n 'beta = ',\n 'a = ',\n 'N0 = ',\n 'u = ',\n 'sigma1 = ',\n 'sigma2 = ',\n 'x01 = ',\n 'x02 = ',\n 'x03 = ',\n ])\n ParValues = np.array([\n self.k,\n self.r,\n self.T0,\n self.N,\n self.R,\n self.T,\n self.dt,\n self.Dt,\n self.L,\n self.gamma,\n self.eta,\n self.Lambda,\n self.delta,\n self.beta,\n self.a,\n self.NN,\n self.u,\n self.sigma1,\n self.sigma2,\n self.Xzero[0, 0],\n self.Xzero[0, 1],\n self.Xzero[0, 2]\n ])\n strPrefix = str(self.Dt)\n name1 = 'StoParameters' + strPrefix + '.txt'\n '''\n name2 = 'StoSolution' + strPrefix + '.txt'\n name3 = 'StoRefSolution' + str(self.dt) + '.txt'\n '''\n PARAMETERS = np.column_stack((tagPar, ParValues))\n np.savetxt(name1, PARAMETERS, delimiter=\" \", fmt=\"%s\")\n '''\n np.save(name2,\n np.transpose(\n (\n t, Uem1, Uem2, Uem3, Ustk1, Ustk2, Ustk3, Utem1,\n Utem2, Utem3\n )\n ))\n np.savetxt(name3,\n np.transpose(\n (\n self.t, Ueem1, Ueem2, Ueem3\n )\n ))\n if self.sigma1 == 0.0:\n if self.sigma2 == 0.0:\n DeterministicData()\n return\n StochasticData()\n '''\n\n return", "def setup_tdd(self,earfcn,bwMhz,powerdBm,ud_config,special_sf_config=0,ul_timing_advance=0,with_rx=False):\r\r\n\r\r\n self.setup_modem()\r\r\n self.instr.setup_4g_tx_test(cable_loss_dB=self.testConfig.cable_loss)\r\r\n self.teststep_idx = 0\r\r\n band,freq_ul,freq_dl = lte_util.get_lte_ul_dl_freq_band(earfcn)\r\r\n\r\r\n self.set_band(band=band)\r\r\n self.modemObj.set_rat_band(rat='LTE', band=band)\r\r\n duplex_mode = self.get_duplex_mode()\r\r\n assert(duplex_mode == \"TDD\")\r\r\n self.instr.lte_tx.set_duplex_mode(duplex_mode=duplex_mode)\r\r\n self.instr.lte_tx.set_band(band=band)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul)\r\r\n self.instr.lte_tx.set_rf_freqMHz(freqMHz=freq_ul)\r\r\n self.set_bw(bwMHz=bwMhz)\r\r\n rf_config = LTE_rf_config(bwMHz=bwMhz)\r\r\n self.modemObj.set_rb(direction='ul', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb(direction='dl', num_rb=rf_config.num_rbs)\r\r\n self.modemObj.set_rb_start(rb_offset=rf_config.rb_offset)\r\r\n self.modemObj.set_rb_len(rb_len=rf_config.rb_len)\r\r\n rf_config.check_config()\r\r\n self.instr.lte_tx.set_channel_bw_MHz(bwMHz=bwMhz)\r\r\n self.modemObj.send_ul_pattern()\r\r\n\r\r\n self.set_ud_config(ud_config)\r\r\n self.modemObj.set_ud_config(ud_config)\r\r\n self.instr.lte_tx.set_ul_dl_conf(ud_config)\r\r\n\r\r\n self.modemObj.enable_tx()\r\r\n\r\r\n bursted = not (ud_config==\"TEST0\" or ud_config==\"TEST1\")\r\r\n self.setup_tdd_trigger(bursted,special_sf_config)\r\r\n\r\r\n self.modemObj.set_special_sf_config(special_sf_config)\r\r\n self.instr.lte_tx.set_special_subframe_conf(special_sf_config)\r\r\n\r\r\n self.modemObj.set_ul_timing_advance(ul_timing_advance)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.instr.lte_tx.set_rf_exp_power(power_dBm=powerdBm+5)\r\r\n self.instr.waitForCompletion()\r\r\n\r\r\n if with_rx:\r\r\n assert(bursted)\r\r\n self.modemObj.set_freqMHz(direction='rx',freqMHz=freq_dl)\r\r\n self.modemObj.set_rxagc_auto(ant='m')\r\r\n self.modemObj.enable_rx(ant='m')\r\r\n\r\r\n self.set_test_afc_val()\r\r\n\r\r\n return bursted", "def main():\n\n # performs crib dragging using initial values\n plaintext1, plaintext2 = crib_drag('', '', 0, 0)\n\n if plaintext1 is None or plaintext2 is None:\n print('No possible English decryption using the current dictionary')\n return\n\n # find the key and creates file with results\n plaintext1 = plaintext1[:CIPHER_LEN]\n plaintext2 = plaintext2[:CIPHER_LEN]\n key = find_key(plaintext1, plaintext2)\n\n with open('plaintext1.txt', 'w') as plain_file:\n plain_file.write(plaintext1)\n with open('plaintext2.txt', 'w') as plain_file:\n plain_file.write(plaintext2)\n with open('key.txt', 'wb') as plain_file:\n plain_file.write(key)", "def TestTDDFT():\n prm = '''\n Model\tTDHF\n Method\tMMUT\n dt\t0.02\n MaxIter\t100\n ExDir\t1.0\n EyDir\t1.0\n EzDir\t1.0\n FieldAmplitude\t0.01\n FieldFreq\t0.9202\n ApplyImpulse\t1\n ApplyCw\t\t0\n StatusEvery\t10\n '''\n geom = \"\"\"\n H 0. 0. 0.\n H 0. 0. 0.9\n H 2.0 0. 0\n H 2.0 0.9 0\n \"\"\"\n output = re.sub(\"py\",\"dat\",sys.argv[0])\n mol = gto.Mole()\n mol.atom = geom\n mol.basis = 'sto-3g'\n mol.build()\n the_scf = pyscf.dft.RKS(mol)\n the_scf.xc='HF'\n print \"Inital SCF finished. E=\", the_scf.kernel()\n aprop = tdscf.tdscf(the_scf,prm,output)\n return", "def modify_dbinstance_tdewith_options(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n self.call_api(params, req, runtime)\n )", "def test_single_dft():\n test_file = os.path.join(DATA_DIR, 'test39_dft.out')\n parser = CRYSTOUT(test_file)\n info = parser.info\n assert info['finished'] == 2 # finished without errors\n assert info['energy'] == -4.8538264773648E+02 * Ha # energy in eV\n assert info['k'] == '6x6x6' # Monkhorst-Pack net\n assert info['H'] == \"LDA/PZ_LDA\"\n assert info['ncycles'][0] == 9\n assert info['electrons']['basis_set']['ecp']['Ge'][0][1] == (0.82751, -1.26859, -1)\n assert info['electrons']['basis_set']['bs']['Ge'][0][1] == (1.834, 0.4939, 0.006414)", "def enable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = True\n\t\tself.change_TTS_engine()", "def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt", "def tdd():\n\n with lcd(FRONTENDDIR):\n cmd = '%(gulp)s tdd' % {'gulp': get_gulp()}\n local(cmd)", "def crypto_test(tdesc, tpm):\n node_name = tdesc.get('name')\n key = get_attribute(tdesc, 'key')\n if len(key) not in (16, 24, 32):\n raise subcmd.TpmTestError('wrong key size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in key)))\n iv = get_attribute(tdesc, 'iv', required=False)\n if iv and len(iv) != 16:\n raise subcmd.TpmTestError('wrong iv size \"%s:%s\"' % (\n node_name,\n ''.join('%2.2x' % ord(x) for x in iv)))\n clear_text = get_attribute(tdesc, 'clear_text')\n if tpm.debug_enabled():\n print('clear text size', len(clear_text))\n cipher_text = get_attribute(tdesc, 'cipher_text', required=False)\n real_cipher_text = crypto_run(node_name, ENCRYPT, key, iv,\n clear_text, cipher_text, tpm)\n crypto_run(node_name, DECRYPT, key, iv, real_cipher_text,\n clear_text, tpm)\n print(utils.cursor_back() + 'SUCCESS: %s' % node_name)", "async def modify_dbinstance_tdewith_options_async(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.encryption_key):\n query['EncryptionKey'] = request.encryption_key\n if not UtilClient.is_unset(request.encryptor_name):\n query['EncryptorName'] = request.encryptor_name\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_arn):\n query['RoleARN'] = request.role_arn\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.tdestatus):\n query['TDEStatus'] = request.tdestatus\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyDBInstanceTDE',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyDBInstanceTDEResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def disable_dual_TTS(self):\n\t\t\n\t\tself.isActiveDualTTS = False\n\t\tself.change_TTS_engine()", "def main(ctx, access_key, host, debug):\n info = {\n \"access_key\": access_key,\n \"host\": host,\n \"DEBUG\": debug\n }\n\n _tda = None\n\n if access_key == \"\":\n configFile = _config_filepath()\n if os.path.exists(configFile):\n with open(configFile, \"r\", encoding=\"utf-8\") as cf:\n if cf.read() != \"\":\n info = _getConf()\n info[\"DEBUG\"] = debug\n\n if info[\"access_key\"] != \"\":\n _tda = TDA(info[\"access_key\"], info[\"host\"])\n if info[\"DEBUG\"]:\n _tda.Debug()\n\n ctx.obj = _tda", "def _get_sql_db_tde_disabled_event(com, ext):\n friendly_cloud_type = util.friendly_string(com.get('cloud_type'))\n reference = com.get('reference')\n description = (\n '{} SQL DB {} has TDE disabled.'\n .format(friendly_cloud_type, reference)\n )\n recommendation = (\n 'Check {} SQL DB {} and enable TDE.'\n .format(friendly_cloud_type, reference)\n )\n event_record = {\n # Preserve the extended properties from the virtual\n # machine record because they provide useful context to\n # locate the virtual machine that led to the event.\n 'ext': util.merge_dicts(ext, {\n 'record_type': 'sql_db_tde_event'\n }),\n 'com': {\n 'cloud_type': com.get('cloud_type'),\n 'record_type': 'sql_db_tde_event',\n 'reference': reference,\n 'description': description,\n 'recommendation': recommendation,\n }\n }\n\n _log.info('Generating sql_db_tde_event; %r', event_record)\n yield event_record", "def encrypt():\n print(\"Use sops to encrypt the file.\")\n print(\"Learn more at https://github.com/mozilla/sops\")", "def test_tte4(self):\n filename = str(self.temp_j2k_filename)\n xtx4_setup(filename)\n self.assertTrue(True)", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t# #Start Checking the Platform\n\t\t\t# if platform.system() == 'Windows':\n\t\t\t# \tself.path = self.path.split('\\\\')[-1]\n\t\t\t# elif platform.system() == 'Linux':\n\t\t\t# \tself.path = self.path.split('/')[-1]\n\t\t\t# # END Checking\n\t\t\t# print('Decryption of '+ self.path +\"...\")\n\t\t\t######################### Blowfish Decryption Algorithm ###############\n\t\t\tbs = Blowfish.block_size\n\t\t\trealData = base64.b64decode(file_data)[8:]\n\t\t\tiv = base64.b64decode(file_data)[:8]\n\t\t\tdecrypt = Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tself.decrypt = decrypt.decrypt(realData)\n\t\t\t########################### End Blowfish #########################\n\t\t\t#print('Writing in your file...')\n\t\t\tself.out = self.path.replace(self.extension,'')\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.out,'wb') as outfile:\n\t\t\t\toutfile.write(self.decrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint(\"Done in \",time.time() - t)\n\t\t\t\n\t\telse:\n\t\t\tprint('The File is Not Encrypted To Decrypted.')", "def get_TTS_data(self, exchanged_token, exchange=False):\n if os.path.exists(self.lock_file):\n ctime = os.stat(self.lock_file).st_ctime\n age = time.time() - ctime\n if age < self.age:\n self.log.error(\"Update already in progres. Sleeping ..\")\n time.sleep(self.age - age)\n else:\n self.log.error(\"Stale lock file, removing ...\")\n os.remove(self.lock_file)\n open(self.lock_file, 'w+').close()\n\n if exchange:\n with file('/tmp/refresh_token') as f:\n refresh_token = f.read()\n self.exchanged_token = self.refresh_token(self.client_id, self.client_secret, refresh_token.strip())\n if isinstance(self.exchanged_token, int):\n self.log.error(\"refresh_token error\")\n\n if self.get_certificate(self.credential_endpoint):\n # load json and prepare objects\n with open('/tmp/output.json') as tts_data_file:\n tts_data = json.load(tts_data_file)\n \n f = open(self.user_cert, 'w+')\n f.write(str(tts_data['credential']['entries'][0]['value']))\n f.close()\n \n f = open(self.user_key, 'w+')\n f.write(str(tts_data['credential']['entries'][1]['value']))\n f.close()\n \n f = open(self.user_passwd, 'w+')\n f.write(str(tts_data['credential']['entries'][2]['value']))\n f.close()\n \n try:\n os.chmod(self.user_key, 0600)\n except OSError, e:\n self.log.error(e)\n self.log.error(\"Permission denied to chmod passwd file\")\n return False\n \n os.remove(self.lock_file)\n \n return True\n else:\n return False", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(path, key, default, output, url, token, vaultpath):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n data = get_config(path, file_type, default=False)\n data = decrypt_credentials(data, key)\n\n # Only merge the DEFAULT section after decrypting.\n if default:\n data = merge_default(data)\n\n if url:\n try:\n import hvac\n except:\n print('''\nTo use Hashicorp's Vault you must install the hvac package.\nTo install it try using the following command:\n\n pip install hvac\n''')\n exit(3)\n\n if not token:\n token = os.environ.get('VAULT_TOKEN', '')\n if not token:\n token = getpass('Vault token: ')\n \n client = hvac.Client(url=url, token=token)\n if not vaultpath:\n vaultpath = path\n\n if vaultpath[0] == '~':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '.':\n vaultpath = vaultpath[1:]\n if vaultpath[0] == '/':\n vaultpath = vaultpath[1:]\n\n data = merge_default(data)\n for heading in data:\n # kargs = { heading: json.dumps(data[heading]) }\n client.write(vaultpath + '/' + heading, **data[heading])\n\n else:\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n config_ini = configparser.ConfigParser(\n dict_type=OrderedDict,\n default_section=default_section,\n interpolation=None)\n for heading in data:\n config_ini.add_section(heading)\n for item in data[heading]:\n config_ini.set(heading, item, data[heading][item])\n config_ini.write(save_file)\n\n else:\n write_yaml(save_file, data)", "def showTF(tf,outDir):\n\n nlo2lo,data2lo,data2nlo,data2lo_A,data2nlo_A=tf\n\n c=ROOT.TCanvas('c','c',500,500)\n c.SetBottomMargin(0)\n c.SetTopMargin(0)\n c.SetLeftMargin(0)\n c.SetRightMargin(0)\n c.cd()\n\n p1=ROOT.TPad('p1','p1',0,0.5,1,1.0)\n p1.Draw()\n p1.SetRightMargin(0.03)\n p1.SetLeftMargin(0.12)\n p1.SetTopMargin(0.1)\n p1.SetBottomMargin(0.01)\n p1.SetGridy()\n p1.cd()\n nlo2lo.Draw('e2')\n nlo2lo.GetYaxis().SetTitle('Z ratio')\n nlo2lo.GetYaxis().SetNdivisions(5)\n nlo2lo.GetXaxis().SetTitleSize(0)\n nlo2lo.GetXaxis().SetLabelSize(0)\n nlo2lo.GetYaxis().SetTitleSize(0.08)\n nlo2lo.GetYaxis().SetTitleOffset(0.8)\n nlo2lo.GetYaxis().SetLabelSize(0.08)\n nlo2lo.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo.Draw('e1same')\n data2nlo.Draw('e1same')\n\n leg1=p1.BuildLegend(0.7,0.88,0.95,0.66)\n leg1.SetFillStyle(0)\n leg1.SetBorderSize(0)\n leg1.SetTextFont(42)\n leg1.SetTextSize(0.06)\n\n l1=ROOT.TLine()\n l1.SetLineWidth(2)\n l1.SetLineColor(ROOT.kBlue)\n l1.DrawLine(data2lo.GetXaxis().GetXmin(),1,data2lo.GetXaxis().GetXmax(),1)\n\n txt=ROOT.TLatex()\n txt.SetNDC(True)\n txt.SetTextFont(42)\n txt.SetTextSize(0.08)\n txt.SetTextAlign(12)\n txt.DrawLatex(0.12,0.95,'#bf{CMS} #it{preliminary}')\n p1.RedrawAxis()\n\n c.cd()\n p2=ROOT.TPad('p2','p2',0,0,1,0.5)\n p2.SetRightMargin(0.03)\n p2.SetLeftMargin(0.12)\n p2.SetTopMargin(0.01)\n p2.SetBottomMargin(0.18)\n p2.SetGridy()\n p2.Draw()\n p2.cd()\n data2lo_A.Draw('e1')\n data2lo_A.GetYaxis().SetTitle('#gamma ratio')\n data2lo_A.GetYaxis().SetNdivisions(5)\n data2lo_A.GetYaxis().SetRangeUser(0.01,1.94)\n data2lo_A.GetXaxis().SetTitleSize(0.08)\n data2lo_A.GetXaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleSize(0.08)\n data2lo_A.GetYaxis().SetLabelSize(0.08)\n data2lo_A.GetYaxis().SetTitleOffset(0.8)\n data2nlo_A.Draw('e1same')\n \n leg2=p2.BuildLegend(0.7,0.94,0.95,0.80)\n leg2.SetFillStyle(0)\n leg2.SetBorderSize(0)\n leg2.SetTextFont(42)\n leg2.SetTextSize(0.06)\n \n l2=ROOT.TLine()\n l2.SetLineColor(ROOT.kBlue)\n l2.SetLineWidth(2)\n l2.DrawLine(data2lo_A.GetXaxis().GetXmin(),1,data2lo_A.GetXaxis().GetXmax(),1)\n\n p2.RedrawAxis()\n\n c.cd()\n c.Modified()\n c.Update()\n for ext in ['png','pdf']:\n c.SaveAs('{0}.{1}'.format(outDir,ext))", "def enable_tee(self):\n self._tee = True", "def test_simulation_persistence(compression, tmp_path):\n path = tmp_path / \"test_simulation_persistence.hdf5\"\n storage = FileStorage(path, compression=compression)\n\n # write some simulation data\n pde = DiffusionPDE()\n grid = UnitGrid([16, 16]) # generate grid\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n pde.solve(state, t_range=0.11, dt=0.001, tracker=storage.tracker(interval=0.05))\n storage.close()\n\n # read the data\n storage = FileStorage(path)\n np.testing.assert_almost_equal(storage.times, [0, 0.05, 0.1])\n data = np.array(storage.data)\n assert data.shape == (3,) + state.data.shape\n grid_res = storage.grid\n assert grid == grid_res\n grid_res = storage.grid\n assert grid == grid_res", "def louder():\n try:\n ttsEng.louder()\n except Exception, e:\n logging.error(e)", "def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def tesselate(options):\n if not options.freplace:\n if len(options.args) != 2:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file and '\n 'one output slf file\\n')\n i3s_file = options.args[0]\n out_file = options.args[1]\n else:\n if len(options.args) != 1:\n raise TelemacException(\\\n '\\nThe code \"tessellate\" here '\n 'requires one i2s/i3s file\\n')\n i3s_file = options.args[0]\n head, _ = path.splitext(i3s_file)\n out_file = head+'.slf'\n\n i3s_file = path.realpath(i3s_file)\n if not path.exists(i3s_file):\n raise TelemacException(\\\n '\\nCould not find '\n 'the file named: {}'.format(i3s_file))\n\n print('\\n\\nTessellating ' + path.basename(i3s_file) + ' within ' + \\\n path.dirname(i3s_file) + '\\n'+'~'*72+'\\n')\n i2s = InS(i3s_file)\n ikle2, ipob2, meshx, meshy = tessellate_poly(i2s, debug=True)\n\n print('\\n\\nWriting down the Selafin file ' + \\\n path.basename(out_file) + '\\n'+'~'*72+'\\n')\n slf = Selafin('')\n slf.title = ''\n slf.nplan = 1\n slf.ndp2 = 3\n slf.ndp3 = 3\n slf.nbv1 = 1\n slf.nvar = 1\n slf.varindex = 1\n slf.varnames = ['BOTTOM ']\n slf.varunits = ['M ']\n slf.ikle2 = ikle2\n slf.ikle3 = slf.ikle2\n slf.meshx = meshx\n slf.meshy = meshy\n slf.npoin2 = i2s.npoin\n slf.npoin3 = slf.npoin2\n slf.nelem2 = len(slf.ikle2)/slf.ndp3\n slf.nelem3 = slf.nelem2\n slf.iparam = [0, 0, 0, 0, 0, 0, 1, 0, 0, 0]\n slf.ipob2 = ipob2\n slf.ipob3 = slf.ipob2\n slf.fole = {'hook':open(out_file, 'wb'), 'endian':\">\",\n 'float':('f', 4), 'name':out_file}\n slf.tags['times'] = [1]\n if options.sph2ll != None:\n radius = 6371000.\n long0, lat0 = options.sph2ll.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n const = np.tan(lat0/2. + np.pi/4.)\n slf.meshx = np.rad2deg(slf.meshx/radius + long0)\n slf.meshy = np.rad2deg(2.*np.arctan(const*np.exp(slf.meshy/radius)) \\\n - np.pi/2.)\n if options.ll2sph != None:\n radius = 6371000.\n long0, lat0 = options.ll2sph.split(\":\")\n long0 = np.deg2rad(float(long0))\n lat0 = np.deg2rad(float(lat0))\n slf.meshx = radius * (np.deg2rad(slf.meshx) - long0)\n slf.meshy = radius * \\\n (np.log(np.tan(np.deg2rad(slf.meshy)/2. + np.pi/4.)) \\\n - np.log(np.tan(lat0/2. + np.pi/4.)))\n if options.ll2utm != None:\n zone = int(options.ll2utm)\n slf.meshx, slf.meshy, zone = utm.from_lat_long(slf.meshx, slf.meshy,\n zone)\n if options.utm2ll != None:\n zone = int(options.utm2ll)\n slf.meshx, slf.meshy = utm.to_lat_long(slf.meshx, slf.meshy, zone)\n slf.append_header_slf()\n slf.append_core_time_slf(0)\n slf.append_core_vars_slf([np.zeros(slf.npoin2)])\n slf.fole['hook'].close()", "def decrypt(self, data):", "def createTripleDES(key, IV, implList=None):\r\n if implList == None:\r\n implList = [\"openssl\", \"pycrypto\"]\r\n\r\n for impl in implList:\r\n if impl == \"openssl\" and cryptomath.m2cryptoLoaded:\r\n return openssl_tripledes.new(key, 2, IV)\r\n elif impl == \"pycrypto\" and cryptomath.pycryptoLoaded:\r\n return pycrypto_tripledes.new(key, 2, IV)\r\n raise NotImplementedError()", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def test(DATASET=\"Texas\", CONFIG=None):\n if CONFIG is None:\n CONFIG = get_config_kACE(DATASET)\n print(f\"Loading {DATASET} data\")\n x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)\n if tf.config.list_physical_devices(\"GPU\") and not CONFIG[\"debug\"]:\n C_CODE = 3\n print(\"here\")\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [50, 50, C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [50, 50, C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [50, 50, C_Y]},\n }\n else:\n print(\"why here?\")\n C_CODE = 1\n TRANSLATION_SPEC = {\n \"enc_X\": {\"input_chs\": C_X, \"filter_spec\": [C_CODE]},\n \"enc_Y\": {\"input_chs\": C_Y, \"filter_spec\": [C_CODE]},\n \"dec_X\": {\"input_chs\": C_CODE, \"filter_spec\": [C_X]},\n \"dec_Y\": {\"input_chs\": C_CODE, \"filter_spec\": [C_Y]},\n }\n print(\"Change Detector Init\")\n cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)\n print(\"Training\")\n training_time = 0\n cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)\n for epochs in CONFIG[\"list_epochs\"]:\n CONFIG.update(epochs=epochs)\n tr_gen, dtypes, shapes = datasets._training_data_generator(\n x_im[0], y_im[0], cross_loss_weight[0], CONFIG[\"patch_size\"]\n )\n TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)\n TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)\n for x, y, _ in EVALUATE.batch(1):\n alpha = cd([x, y])\n cross_loss_weight = 1.0 - alpha\n training_time += tr_time\n\n cd.load_all_weights(cd.log_path)\n cd.final_evaluate(EVALUATE, **CONFIG)\n metrics = {}\n for key in list(cd.difference_img_metrics.keys()) + list(\n cd.change_map_metrics.keys()\n ):\n metrics[key] = cd.metrics_history[key][-1]\n metrics[\"F1\"] = metrics[\"TP\"] / (\n metrics[\"TP\"] + 0.5 * (metrics[\"FP\"] + metrics[\"FN\"])\n )\n timestamp = cd.timestamp\n epoch = cd.epoch.numpy()\n speed = (epoch, training_time, timestamp)\n del cd\n gc.collect()\n return metrics, speed", "def crypto_run(node_name, op_type, key, iv, in_text, out_text, tpm):\n mode_name, submode_name = node_name.split(':')\n submode_name = submode_name[:3].upper()\n\n mode = SUPPORTED_MODES.get(mode_name.upper())\n if not mode:\n raise subcmd.TpmTestError('unrecognizable mode in node \"%s\"' % node_name)\n\n submode = mode.submodes.get(submode_name, 0)\n cmd = '%c' % op_type # Encrypt or decrypt\n cmd += '%c' % submode # A particular type of a generic algorithm.\n cmd += '%c' % len(key)\n cmd += key\n cmd += '%c' % len(iv)\n if iv:\n cmd += iv\n cmd += struct.pack('>H', len(in_text))\n cmd += in_text\n if tpm.debug_enabled():\n print('%d:%d cmd size' % (op_type, mode.subcmd),\n len(cmd), utils.hex_dump(cmd))\n wrapped_response = tpm.command(tpm.wrap_ext_command(mode.subcmd, cmd))\n real_out_text = tpm.unwrap_ext_response(mode.subcmd, wrapped_response)\n if out_text:\n if len(real_out_text) > len(out_text):\n real_out_text = real_out_text[:len(out_text)] # Ignore padding\n if real_out_text != out_text:\n if tpm.debug_enabled():\n print('Out text mismatch in node %s:\\n' % node_name)\n else:\n raise subcmd.TpmTestError(\n 'Out text mismatch in node %s, operation %d:\\n'\n 'In text:%sExpected out text:%sReal out text:%s' % (\n node_name, op_type,\n utils.hex_dump(in_text),\n utils.hex_dump(out_text),\n utils.hex_dump(real_out_text)))\n return real_out_text", "def modify_dbinstance_tde(\n self,\n request: dds_20151201_models.ModifyDBInstanceTDERequest,\n ) -> dds_20151201_models.ModifyDBInstanceTDEResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_tdewith_options(request, runtime)", "def enableDestruction(self):\n self.destructable = True", "def SPIjedec(self):\n data=[0x9f, 0, 0, 0];\n data=self.SPItrans(data);\n jedec=0;\n self.JEDECmanufacturer=ord(data[1]);\n if self.JEDECmanufacturer==0xFF:\n self.JEDECtype=0x20;\n self.JEDECcapacity=0x14;\n jedec=0x202014;\n else:\n self.JEDECtype=ord(data[2]);\n self.JEDECcapacity=ord(data[3]);\n jedec=(ord(data[1])<<16)+(ord(data[2])<<8)+ord(data[3]);\n self.JEDECsize=self.JEDECsizes.get(self.JEDECcapacity);\n if self.JEDECsize==None:\n self.JEDECsize=0;\n \n if jedec==0x1F4501:\n self.JEDECsize=1024**2;\n self.JEDECdevice=jedec;\n return data;", "def set_dft(self, value):\n self.dft = value", "def setup_UT_te(self):\n self.setup_O()\n self.setup_T()\n # diagonalizing T\n ET, LT, RT = eig(self.T, b=self.O, left=True, right=True)\n LT = LT.transpose().conjugate()\n exp_T = np.exp(-1j*ET / self.hbar)\n # order according to absolute value:\n i_sort = np.argsort(-abs(exp_T))\n exp_T = exp_T[i_sort]\n RT = RT[:,i_sort]\n LT = LT[i_sort,:]\n # normalize RL to O and test the decomposition\n RT, LT = self.normalize_RL_to_O(RT, LT)\n # test the quality of the decomposition -------------------------\n # we exclude directions of evals below 10**(-15) by hand\n max_mode = len(np.where(abs(exp_T)>10**(-15))[0])\n ET_red = ET[:max_mode]\n RT_red = RT[:,:max_mode]\n LT_red = LT[:max_mode,:]\n # 1) test of orthogonality on the reduced space\n unity = np.dot(LT_red, np.dot(self.O, RT_red))\n ortho_error = abs(unity - np.diag(np.ones(max_mode))).max()\n print(\"Orthogonality errors\", ortho_error)\n # 1) test difference between the full and the reduced te-operator\n UT_red = np.dot(RT_red, np.dot(np.diag(exp_T[:max_mode]),\n np.dot(LT_red, self.O)))\n UT = np.dot(RT, np.dot(np.diag(exp_T), np.dot(LT, self.O)))\n print(\"Propagator error\", abs(UT_red - UT).max())\n self.UT = UT", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def testRead(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',\n inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestRead(file_object)", "def _phi_ode(self, t, z):\n # prep states and phi_matrix\n state = z[0:len(self.istate)]\n ad_state = make_ad(state)\n phi_flat = clear_ad(z[len(self.istate):])\n phi = np.reshape(phi_flat, (len(self.istate),\n len(self.istate)))\n\n # find the accelerations and jacobian\n state_deriv = self.force_model.ode(0, ad_state)\n a_matrix = jacobian(self.force_model.ode(0, ad_state),\n ad_state)\n\n # compute the derivative of the STM and repackage\n phid = np.matmul(a_matrix, phi)\n phid_flat = phid.flatten()\n z_out = np.concatenate((state_deriv, phid_flat))\n\n return z_out", "def test_tte2(self):\n filename = str(self.temp_j2k_filename)\n xtx2_setup(filename)\n self.assertTrue(True)", "def test_encrypt_decrypt(self):\n with open(self.file_path, \"rt\") as file:\n start_file = file.read()\n nonce1 = globals.generate_random_nonce()\n nonce2 = globals.generate_random_nonce()\n encrypted_file_path, additional_data = self.file_crypt.encrypt_file(\n self.file_path,\n nonce1,\n nonce2)\n file_decrypted = self.file_crypt.decrypt_file(\n file_path=encrypted_file_path,\n additional_data=additional_data)\n with open(file_decrypted, \"rt\") as file:\n end_file = file.read()\n self.assertEqual(start_file, end_file, \"Files differ!\")", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def create_tvel_file(\n depth: np.array,\n vp: np.array,\n vs: np.array,\n dens: np.array,\n save_folder: str,\n name: str = \"Test\",\n):\n\n assert (\n len(depth) == len(vp) and len(depth) == len(vs) and len(depth) == len(dens)\n ), \"All arrays (depth, vp, vs and dens) should be of same length\"\n\n \"\"\" combining all the data vector \"\"\"\n data = np.vstack((np.vstack((np.vstack((depth, vp)), vs)), dens)).T\n\n with open(join(save_folder, f\"{name}.tvel\"), \"w\") as f:\n f.write(\"# Input file for TauP\\n\")\n f.write(\"NAME TAYAK_BKE\\n\")\n for line in data:\n f.write(f\"{line[0]:8.2f}{line[1]:8.3f}{line[2]:8.3f}{line[3]:8.3f}\\n\")\n f.write(\n \"\"\" 1596.98 4.986 0.000 5.855\n 1853.05 5.150 0.000 6.025\n 2109.13 5.284 0.000 6.166\n 2365.20 5.393 0.000 6.280\n 2621.27 5.475 0.000 6.368\n 2877.35 5.534 0.000 6.430\n 3133.42 5.569 0.000 6.467\n 3389.50 5.569 0.000 6.467\"\"\"\n )\n f.close()", "def test_create_tpm(self):\n command_line = self._MENU + [self._POOLNAME] + self._DEVICES + [\"--clevis=tpm2\"]\n TEST_RUNNER(command_line)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def t_xtide(*varargin):\n nargin = len(varargin)\n if nargin > 0:\n varargin = varargin[0]\n if not os.path.exists('t_xtide.mat'):\n # Read the harmonics file and make a mat file\n filnam = '/usr/share/xtide/harmonics.txt'\n fprintf(\"\\\\n********Can't find mat-file t_xtide.mat ********\\\\n\\\\n\")\n fprintf('Attempting to generate one from an xtide harmonics file....\\\\n\\\\n')\n fprintf('Latest version available from http://bel-marduk.unh.edu/xtide/files.html\\\\n\\\\n')\n # Input name\n fid = - 1\n while fid == - 1:\n\n rep = filnam\n while (lower(rep[0]) != 'y'):\n\n filnam = rep\n rep = 'n'\n rep = input_('Harmonics filename: ' + filnam + '? (y/Y/new file name):', 's')\n if (0 in rep.shape):\n rep = 'y'\n\n fid = open(filnam)\n if fid == - 1:\n fprintf(\"\\\\n****** Can't open filename ->\" + filnam + '<-\\\\n\\\\n')\n\n fprintf('Reading harmonics file (this will take a while)\\\\n')\n xtide, xharm = read_xtidefile(fid) # nargout=2\n fprintf('Saving harmonic information to t_xtide.mat\\\\n')\n savemat('t_xtide', 'xtide', 'xharm')\n else:\n loadmat('t_xtide',matlab_compatible=True)\n if nargin > 0:\n if isstr(varargin[0]):\n # Station name given\n # Identify station - look for exact match first\n ista = strmatch(lower(varargin[0]), lower(xharm.station), 'exact')\n # otherwise go for partial matches\n if (0 in ista.shape):\n # First check to see if a number was selected:\n inum = - 10\n while inum < - 1:\n\n inum = inum + 1\n ll = findstr(lower(varargin[0]), sprintf('(\\n %d)', - inum))\n if not (0 in ll.shape):\n inum = abs(inum)\n varargin[0] = deblank(varargin[0](range(1, (ll - 1 +1))))\n\n ista = strmatch(lower(varargin[0]), lower(xharm.station))\n if max(ista.shape) > 1:\n if inum > 0 & inum <= max(ista.shape):\n ista = ista[(inum -1)]\n else:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n if max(ista.shape) == 1 & inum > 1:\n fprintf(\"***Can't find variant (\\n %d) of station - Taking only choice\\\\n\", inum)\n else:\n if max(ista.shape) == 0:\n error('Could not match station')\n varargin[0] = np.array([])\n else:\n # Lat/long?\n dist, hdg = t_gcdist(xharm.latitude, xharm.longitude, varargin[1], varargin[0]) # nargout=2\n mind, ista = np.min(dist) # nargout=2\n if max(ista.shape) > 1:\n fprintf('Ambiguous Station Choice - Taking first of:\\\\n')\n for kk in range(1, (max(ista.shape) +1)):\n fprintf('\\n %5d: \\n %s\\\\n', ista[(kk -1)], deblank(xharm.station(ista[(kk -1)], :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista[(kk -1)]), xharm.latitude(ista[(kk -1)]))\n fprintf('\\\\n')\n ista = ista[0]\n else:\n fprintf('\\n %5d: \\n %s\\\\n', ista, deblank(xharm.station(ista, :)))\n fprintf(' Long: \\n %.4f Lat: \\n %.4f \\\\n', xharm.longitude(ista), xharm.latitude(ista))\n varargin[0:2] = np.array([])\n # Time vector (if available) otherwise take current time.\n if max(varargin.shape) > 0 & not isstr(varargin[0]):\n tim = varargin[0]\n tim = tim[:].T\n varargin[0] = np.array([])\n if max(tim.shape) == 1:\n if tim < 1000:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, (tim +1), 1 / 48)]).reshape(1, -1)\n else:\n tim = tim + np.array([range(0, 3, 1 / 48)]).reshape(1, -1)\n # 2 days worth.\n else:\n dat = clock\n tim = datenum(dat[0], dat[1], dat[2]) + np.array([range(0, 49, 0.25)]).reshape(1, -1) / 24\n # Parse properties\n format_ = 'raw'\n unt = 'original'\n k = 1\n while max(varargin.shape) > 0:\n\n if 'for' == lower(varargin[-1](range(1, 4))):\n format_ = lower(varargin[1])\n else:\n if 'uni' == lower(varargin[-1](range(1, 4))):\n unt = lower(varargin[1])\n else:\n error(\"Can't understand property:\" + varargin[0])\n varargin[(np.array([1, 2]).reshape(1, -1) -1)] = np.array([])\n\n # if we want a time series\n pred = np.array([])\n # Convert units if requested.\n units, convf = convert_units(unt, xharm.units(ista, :)) # nargout=2\n if format_[0:2] == 'ra' | format_[0:2] == 'fu' | format_[0:2] == 'ti':\n # Data every minute for hi/lo forecasting.\n if format_[0:2] == 'ti':\n tim = range(tim[0], (tim[-1] +1), (1 / 1440))\n # Convert into time since the beginning of year\n mid = datevec(mean(tim))\n iyr = mid[0] - xtide.startyear + 1\n lt = max(tim.shape)\n xtim = np.dot((tim - datenum(mid[0], 1, 1)), 24)\n # Hours since beginning of year\n #-----------------------------------------------------\n # Sum up everything for the prediction!\n pred = xharm.datum(ista) + np.sum(repmat(xtide.nodefactor(:, iyr) * xharm.A(ista, :).T, 1, lt) * cos(np.dot((np.dot(xtide.speed, xtim) + repmat(xtide.equilibarg(:, iyr) - xharm.kappa(ista, :).T, 1, lt)), (pi / 180))), 1)\n #-----------------------------------------------------\n pred = np.dot(pred, convf)\n # Compute times of hi/lo from every-minute data\n if format_[0:2] == 'ti':\n # Check if this is a current station\n if not (0 in findstr('Current', xharm.station(ista, :)).shape):\n currents = 1\n else:\n currents = 0\n dpred = diff(pred)\n ddpred = diff(dpred > 0)\n flat = np.flatnonzero(ddpred != 0) + 1\n slk = np.flatnonzero(sign(pred[0:pred.shape[0] - 1]) != sign(pred[1:pred.shape[0]]))\n hi.mtime = tim[(flat -1)]\n hi.value = pred[(flat -1)]\n hi.type = np.zeros(shape=(flat.shape, flat.shape), dtype='float64')\n hi.type(np.flatnonzero(ddpred[(flat - 1 -1)] < 0)) = 1\n # 0=lo, 1=hi\n hi.units = deblank(units)\n pred = hi\n # Create information structure\n if format_[0:2] == 'in' | format_[0:2] == 'fu':\n if not (0 in pred.shape):\n pred.yout = pred\n pred.mtime = tim\n else:\n kk = np.flatnonzero(xharm.A(ista, :) != 0)\n pred.freq = xtide.name(kk, :)\n pred.A = np.dot(full(xharm.A(ista, kk).T), convf)\n pred.kappa = full(xharm.kappa(ista, kk).T)\n pred.station = deblank(xharm.station(ista, :))\n pred.longitude = xharm.longitude(ista)\n pred.latitude = xharm.latitude(ista)\n pred.timezone = xharm.timezone(ista)\n pred.units = deblank(units)\n pred.datum = np.dot(xharm.datum(ista), convf)\n # If no output parameters then we plot or display things\n if nargout == 0:\n if 'ti' == format_[(((0:2 -1) -1) -1)]:\n fprintf('High/Low Predictions for \\n %s\\\\n', xharm.station(ista, :))\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', xharm.timezone(ista))\n outstr = repmat(' ', max(flat.shape), 41)\n outstr[:, 0:20] = datestr(hi.mtime)\n outstr[:, 21:27] = reshape(sprintf('\\n %6.2f', hi.value), 6, max(flat.shape)).T\n if currents:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' Flood Tide', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Ebb Tide ', np.sum(ll), 1)\n else:\n ll = hi.type == 1\n outstr[(ll -1), 30:41] = repmat(' High Tide ', np.sum(ll), 1)\n ll = hi.type == 0\n outstr[(ll -1), 30:41] = repmat(' Low Tide ', np.sum(ll), 1)\n disp(outstr)\n else:\n if 'ra' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'fu' == format_[(((0:2 -1) -1) -1)]:\n plot(tim, pred.yout)\n datetick\n title('Tidal prediction for ' + deblank(xharm.station(ista, :)) + ' beginning ' + datestr(tim[0]))\n ylabel(deblank(xharm.units(ista, :)))\n else:\n if 'in' == format_[(((0:2 -1) -1) -1)]:\n fprintf('Station: \\n %s\\\\n', pred.station)\n if pred.longitude < 0:\n lon = 'W'\n else:\n lon = 'E'\n if pred.latitude < 0:\n lat = 'S'\n else:\n lat = 'N'\n fprintf(\"Location: \\n %d \\n %.1f' \\n %c, \\n %d \\n %.1f' \\n %c\\\\n\", fix(abs(pred.latitude)), np.dot(rem(abs(pred.latitude), 1), 60), lat, fix(abs(pred.longitude)), np.dot(rem(abs(pred.longitude), 1), 60), lon)\n fprintf('Time offset \\n %.1f from UTC\\\\n\\\\n', pred.timezone)\n clear('pred')\n #\n return pred", "def decryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n\n dec = Decrypt(infile)\n\n if mode.upper() == 'AES':\n decrypted_data = dec.AES(password)\n elif mode.upper() == 'DES':\n decrypted_data = dec.DES(password)\n elif mode.upper() == 'SALSA20':\n decrypted_data = dec.Salsa20(password)\n else:\n return 2\n\n if not decrypted_data:\n cleanup(outfile)\n return 3\n\n if not outfile.endswith(dec.extension):\n outfile += dec.extension\n write_data(decrypted_data, outfile)\n return 0", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def _dK_ode_dtheta(self, target):\r\n t_ode = self._t[self._index>0]\r\n dL_dK_ode = self._dL_dK[self._index>0, :]\r\n index_ode = self._index[self._index>0]-1\r\n if self._t2 is None:\r\n if t_ode.size==0:\r\n return \r\n t2_ode = t_ode\r\n dL_dK_ode = dL_dK_ode[:, self._index>0]\r\n index2_ode = index_ode\r\n else:\r\n t2_ode = self._t2[self._index2>0]\r\n dL_dK_ode = dL_dK_ode[:, self._index2>0]\r\n if t_ode.size==0 or t2_ode.size==0:\r\n return\r\n index2_ode = self._index2[self._index2>0]-1\r\n\r\n h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=self.is_stationary, update_derivatives=True)\r\n #self._dK_ddelay = self._dh_ddelay\r\n self._dK_dsigma = self._dh_dsigma\r\n\r\n if self._t2 is None:\r\n h2 = h1\r\n else:\r\n h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode, stationary=self.is_stationary, update_derivatives=True)\r\n\r\n #self._dK_ddelay += self._dh_ddelay.T\r\n self._dK_dsigma += self._dh_dsigma.T\r\n # C1 = self.sensitivity\r\n # C2 = self.sensitivity\r\n\r\n # K = 0.5 * (h1 + h2.T)\r\n # var2 = C1*C2\r\n # if self.is_normalized:\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + sum(sum(dL_dK.*dh2_dD1.T)))*0.5*var2\r\n # dk_dD2 = (sum(sum(dL_dK.*dh1_dD2)) + sum(sum(dL_dK.*dh2_dD2.T)))*0.5*var2\r\n # dk_dsigma = 0.5 * var2 * sum(sum(dL_dK.*dK_dsigma))\r\n # dk_dC1 = C2 * sum(sum(dL_dK.*K))\r\n # dk_dC2 = C1 * sum(sum(dL_dK.*K))\r\n # else:\r\n # K = np.sqrt(np.pi) * K\r\n # dk_dD1 = (sum(sum(dL_dK.*dh1_dD1)) + * sum(sum(dL_dK.*K))\r\n # dk_dC2 = self.sigma * C1 * sum(sum(dL_dK.*K))\r\n\r\n\r\n # dk_dSim1Variance = dk_dC1\r\n # Last element is the length scale.\r\n (dL_dK_ode[:, :, None]*self._dh_ddelay[:, None, :]).sum(2)\r\n\r\n target[-1] += (dL_dK_ode*self._dK_dsigma/np.sqrt(2)).sum()\r\n\r\n\r\n # # only pass the gradient with respect to the inverse width to one\r\n # # of the gradient vectors ... otherwise it is counted twice.\r\n # g1 = real([dk_dD1 dk_dinvWidth dk_dSim1Variance])\r\n # g2 = real([dk_dD2 0 dk_dSim2Variance])\r\n # return g1, g2\"\"\"\r", "def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]", "def example():\r\n path = os.path.abspath(os.path.dirname(__name__))\r\n module = CryptoModule()\r\n # create_name this is open source py module with confidential information\r\n opened_path = os.path.join(path, 'secret.py')\r\n # read_name this is open encrypted py module with confidential information\r\n secured_path = os.path.join(path, 'secured.py')\r\n # encrypt, read secret.py and create secured.py\r\n module.create_secured_module(path_to_opened_module=opened_path, path_to_secured_module=secured_path,\r\n create_key=True, delete_source_opened_module=False)\r\n # decrypt, read secured.py and create opened.py\r\n module.create_opened_module(path_to_secured_module=secured_path, path_to_opened_module=opened_path)\r\n print('ok')", "def test_tte1(self):\n filename = str(self.temp_j2k_filename)\n self.xtx1_setup(filename)", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def detx(self, det_id, t0set=None, calibration=None):\n url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args\n if t0set is not None:\n url += '&t0set=' + t0set\n if calibration is not None:\n url += '&calibrid=' + calibration\n\n detx = self._get_content(url)\n return detx", "def unfreeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:1\"\n ).wait()", "def main():\n \n # Ask for their option.\n \n inputFile = \"\"\n outputFile = \"\"\n \n choice = askOption()\n key = askForKey()\n \n inputFile = askInputFile()\n inputText = readText(inputFile)\n \n outputFile = askOutputFile()\n \n #Start the timer here.\n startTimer = time.time()\n \n # Depending on their choice, encode or decode.\n if choice == 'e':\n encryptedText = RouteCipher.encrypt(inputText, key)\n writeText(encryptedText, outputFile)\n elif choice == 'd':\n decryptedText = RouteCipher.decrypt(inputText, key)\n writeText(decryptedText, outputFile)\n \n finishTimer = time.time()\n totalTime = round(finishTimer - startTimer, 2)\n \n print(\"The operation was succesful\")\n print(f\"Total time needed: {totalTime}\")", "def StoreAntirollback(now, ar_filename, kern_f):\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)", "def test_dfunction_saveable(self):\n \n wa = FrequencyAxis(0.0, 1000, 0.1)\n \n fw = numpy.exp(-wa.data)\n \n fce = DFunction(wa,fw)\n \n #fce.plot()\n\n #with h5py.File(\"test_file_1\",driver=\"core\", \n # backing_store=False) as f:\n with tempfile.TemporaryFile() as f:\n \n fce.save(f, test=True)\n \n fce2 = DFunction()\n fce2 = fce2.load(f, test=True)\n \n #fce2.plot()\n \n numpy.testing.assert_array_equal(fce.data, fce2.data)", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "def vmdexec(cmds):\n handle,filename=mkstemp(dir='/tmp')\n open(filename,'w').write(cmds)\n os.system('vmd -dispdev text -e %s'%filename) # run vmd in the terminal\n os.system('/bin/rm %s'%filename) # clean-up", "def construct_TDI(self, t, Orbit):\n\t\n\tself.make_padded_delta_l(t)\n\n\tp12 = td.Phase(1,2, t, self.delta_l_padded[0,1,:])\n\tp21 = td.Phase(2,1, t, self.delta_l_padded[1,0,:])\n\n\tp13 = td.Phase(1,3, t, self.delta_l_padded[0,2,:])\n\tp31 = td.Phase(3,1, t, self.delta_l_padded[2,0,:])\n\n\tp23 = td.Phase(2,3, t, self.delta_l_padded[1,2,:])\n\tp32 = td.Phase(3,2, t, self.delta_l_padded[2,1,:])\n \n\tp12.FT_phase(Orbit)\n\tp21.FT_phase(Orbit)\n\tp13.FT_phase(Orbit)\n\tp31.FT_phase(Orbit)\n\tp23.FT_phase(Orbit)\n\tp32.FT_phase(Orbit)\n\n\ttdi_GW = td.TDI(p12, p21, p13, p31, p23, p32, Orbit)\n\t\n\treturn tdi_GW", "async def describe_dbinstance_tdeinfo_with_options_async(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n await self.call_api_async(params, req, runtime)\n )", "def dftb_dftd3(third_ord, damp_flag, damp_exp):\n dftb_dftd3=\"\"\"\n ThirdOrderFull = {{ third_ord }}\n DampXH = {{ damp_flag }}\n DampXHExponent = {{ damp_exp }}\n Dispersion = DftD3{}\n}\n \"\"\"\n return Environment().from_string(dftb_dftd3).render(third_ord=third_ord, damp_flag=damp_flag, damp_exp=damp_exp)", "def test_encrypt_decrypt(self):\n reference = get_random_test_tensor()\n encrypted_tensor = SharedTensor(reference)\n self._check(encrypted_tensor, reference, 'en/decryption failed')", "def test_default_run_ubuntu_keep_vdmx():\n test_dir = os.path.join(\"tests\", \"test_files\", \"fonts\", \"temp\")\n notouch_inpath = os.path.join(\"tests\", \"test_files\", \"fonts\", \"Ubuntu-Regular.ttf\")\n test_inpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular.ttf\"\n )\n test_outpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular-dehinted.ttf\"\n )\n test_args = [test_inpath, \"--keep-vdmx\"]\n\n # setup\n if os.path.isdir(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n shutil.copyfile(notouch_inpath, test_inpath)\n\n # execute\n run(test_args)\n\n # test\n tt = TTFont(test_outpath)\n assert \"VDMX\" in tt\n\n # tear down\n shutil.rmtree(test_dir)", "def direct_mode_test(self,earfcn,bwMhz,powerdBm,ud_config,sf_sweep=False,with_rx=False):\r\r\n\r\r\n self.meas_list = ['FREQ_ERR','IQ_OFFSET', 'EVM']\r\r\n tol_dB = 1\r\r\n\r\r\n bursted = self.setup_tdd(earfcn,bwMhz,powerdBm,ud_config,with_rx=with_rx)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n\r\r\n # Note - Direct AGC value leads to different powers on different platforms\r\r\n # -- use driver mode and read back AGC value to get baseline,\r\r\n # then try that value in direct mode.\r\r\n dac_value = self.modemObj.query_txagc()\r\r\n\r\r\n # Set minimum power\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n # Set the original power, but as a direct gain DAC word this time.\r\r\n self.modemObj.set_txagc_direct(value=dac_value)\r\r\n\r\r\n sf_sweep = bursted and sf_sweep\r\r\n meas_sf_list = range(10) if sf_sweep else [2] # 2 is always UL\r\r\n for meas_sf in meas_sf_list:\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n if sf_is_uplink(ud_config, meas_sf):\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n else:\r\r\n # Non-UL subframe, do not expect signal\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n # Check going back to driver mode\r\r\n self.modemObj.set_txagc_dbm(value=-70)\r\r\n self.GetTesterMeasurments(exp_powerdBm=-9999,\r\r\n tol_dB=0,\r\r\n exp_no_signal=True)\r\r\n self.modemObj.set_txagc_dbm(value=powerdBm)\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def test_tdwr():\n f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))\n assert f.prod_desc.prod_code == 182", "def main():\n print(\"Reading from config.json\")\n download_decrypt_store = DownloadDecryptStore()\n print(\"Downloading key from storage-bucket\")\n file_path = download_decrypt_store.download_key_from_blob()\n print(\"Decrypting downloaded file\")\n download_decrypt_store.decrypt_from_file(file_path)\n print(\"Completed\")", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def describe_dbinstance_tdeinfo_with_options(\n self,\n request: dds_20151201_models.DescribeDBInstanceTDEInfoRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeDBInstanceTDEInfoResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeDBInstanceTDEInfo',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeDBInstanceTDEInfoResponse(),\n self.call_api(params, req, runtime)\n )", "def test_tte5(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename)\n self.assertTrue(True)", "def configure_disable_aes_encryption(device):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*Continue\\s*with\\s*master\\s*key\\s*deletion.*\",\n action=\"sendline(yes)\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"no key config-key password-encrypt\", reply=dialog)\n device.configure(\"no password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not remove aes password encryption on {device}.\\nError: {e}\".format(\n device=device.name, e=str(e))\n )", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def encrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension not in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t#Start To CHecking The PlatForm\n\t\t\t# if platform.system() == \"Windows\":\n\t\t\t# \tself.path_dir = self.path.split(\"\\\\\")[-1]\n\t\t\t# elif platform.system() == \"Linux\":\n\t\t\t# \tself.path_dir = self.path.split('/')[-1]\n\t\t\t# #End Checking Wich Platform\n\t\t\t# print('Encryption of '+self.path_dir+'...')\n\t\t\t# print('It\\'s may take a will')\n\t\t\t################################### Blowfish Algorithm ##############################\n\t\t\tbs = Blowfish.block_size\n\t\t\tiv = Random.new().read(bs)\n\t\t\tpadding = b\"}\"\n\t\t\tp = lambda s: s+(bs - len(s) % bs )*padding\n\t\t\tc= Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tencrypt = iv + c.encrypt(p(file_data))\n\t\t\tself.encrypt = base64.b64encode(encrypt) \n\t\t\t################################################################\n\t\t\t#print(\"writing in your file ...\")\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.path + self.extension,\"wb\") as newfile:\n\t\t\t\tnewfile.write(self.encrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint('Done In '+ time.time() -t)\n\t\telse:\n\t\t\tprint('The File is already encrypt.')", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def encryptor(file_name, key, plaintext):\n\twith open(file_name, 'w') as efile:\n\t\tenc = encrypt(key, plaintext)\n\t\tefile.write(enc)\n\t\tefile.close()\n\t\tetext = \"An encrypted passfile was created named key.enc for further use in this script by the user: \"\n\t\tcreateLog(etext, 'logs/macupdate.log')", "def disable_tee(self):\n self._tee = False", "def test_storage_truncation(tmp_path):\n file = tmp_path / \"test_storage_truncation.hdf5\"\n for truncate in [True, False]:\n storages = [MemoryStorage()]\n if module_available(\"h5py\"):\n storages.append(FileStorage(file))\n tracker_list = [s.tracker(interval=0.01) for s in storages]\n\n grid = UnitGrid([8, 8])\n state = ScalarField.random_uniform(grid, 0.2, 0.3)\n eq = DiffusionPDE()\n\n eq.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)\n if truncate:\n for storage in storages:\n storage.clear()\n eq.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)\n\n times = np.arange(0.1, 0.201, 0.01)\n if not truncate:\n times = np.r_[np.arange(0, 0.101, 0.01), times]\n for storage in storages:\n msg = f\"truncate={truncate}, storage={storage}\"\n np.testing.assert_allclose(storage.times, times, err_msg=msg)\n\n if any(platform.win32_ver()):\n for storage in storages:\n if isinstance(storage, FileStorage):\n storage.close()\n\n assert not storage.has_collection", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def change_TTS_engine(self):\n\t\t\n\t\tif self.isActiveDualTTS:\n\t\t\t#dual TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/dual_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is enabled. Using Amazon Polly TTS in case of internet connection, else use offline Picotts TTS.')\n\t\t\t\n\t\telse:\n\t\t\t#go back to single offline Picotts TTS\n\t\t\tpath_to_file = '/var/lib/snips/skills/snips_app_pilldispenser/settings/single_TTS.sh'\n\t\t\tsubprocess.call([path_to_file])\n\t\t\tprint('Dual TTS is disabled. Using offline Picotts TTS regardless of internect connection.')", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_ttd0(self):\n filename = str(self.temp_j2k_filename)\n ttx0_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 1000,\n 'y1': 1000,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def test_ttd1(self):\n filename = str(self.temp_j2k_filename)\n\n # Produce the tte0 output file for ttd0 input.\n self.xtx1_setup(filename)\n\n kwargs = {'x0': 0,\n 'y0': 0,\n 'x1': 128,\n 'y1': 128,\n 'filename': filename,\n 'codec_format': openjp2.CODEC_J2K}\n tile_decoder(**kwargs)\n self.assertTrue(True)", "def freeze_rotation(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell content insert --uri content://settings/system --bind name:s:accelerometer_rotation --bind value:i:0\"\n ).wait()", "def setupForFTK(self):\n t1 = self.getKeyword('ISS CONF T1NAME').strip()\n t2 = self.getKeyword('ISS CONF T2NAME').strip()\n #swapped = self.getKeyword('ISS PRI STS'+t1[2]+' GUIDE_MODE').strip()\n\n fsub_pos_fri = self.maxSnrInScan(fsu='FSUB', opdc='OPDC', plot=1)\n fsua_pos_fri = self.maxSnrInScan(fsu='FSUA', opdc='OPDC', plot=2)\n print '---{'+self.insmode+'}---'\n if swapped == 'NORMAL':\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =',-fsub_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL1 NAME').strip(),\\\n '] = ',(fsub_pos_fri-fsua_pos_fri)\n else:\n print ' OPDC -> [ZPD offset] x [sign',self.DLtrack,\\\n '] =', fsua_pos_fri\n print 'DOPDC -> [ZPD offset] x [sign',\\\n self.getKeyword('ISS DDL2 NAME').strip(),\\\n '] = ',(fsua_pos_fri-fsub_pos_fri)\n return", "def setup_tdelta(self, dir1: str, num1: int, pos1: str, dir2: str, num2: int, pos2: str) -> None:\n cmd = ':measure:define deltatime,{0},{1},{2},{3},{4},{5}'.format(dir1, num1, pos1, dir2, num2, pos2)\n self.write(cmd)", "def decrypt(self, payload):\r\n\r\n #print(b'payload: %s'%(payload))\r\n decrypt1 = aes(self.ivkey, 2, self.staticiv)\r\n iv = decrypt1.decrypt(b'%s'%(payload['eiv']))\r\n #print(b'iv : %s'%(iv))\r\n decrypt2 = aes(b'%s'%(self.datakey), 2, b'%s'%(iv))\r\n temp = decrypt2.decrypt(b'%s'%(payload['ed']))\r\n #print(b'data : %s'%(temp))\r\n x_accel = int.from_bytes(temp[:4],\"big\")\r\n y_accel = int.from_bytes(temp[4:8],\"big\")\r\n z_accel = int.from_bytes(temp[8:12],\"big\")\r\n temp = float(temp[12:])\r\n print(x_accel,y_accel,z_accel,temp)\r\n temp1 = dict()\r\n \r\n temp1[\"value1\"] = str(x_accel)\r\n temp1[\"value2\"] = str(y_accel)\r\n temp1[\"value3\"] = str(z_accel)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = str(temp)\r\n temp1[\"value2\"] = str(self.nodeid)\r\n temp1[\"value3\"] = str(self.sessionID)\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n temp1[\"value1\"] = ''\r\n temp1[\"value2\"] = ''\r\n temp1[\"value3\"] = ''\r\n urequests.request(\"POST\", \"http://maker.ifttt.com/trigger/Spinner2/with/key/c6aKBXblAZ9tkL3Vu9tIlr\", json=temp1, headers={\"Content-Type\": \"application/json\"})\r\n \r\n if self.x_accel == None or self.y_accel == None or self.z_accel == None:\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n elif abs(self.x_accel - x_accel) > 30 or abs(self.y_accel - y_accel) > 30 or abs(self.z_accel - z_accel) > 30:\r\n self.R_LED.value(1)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n else:\r\n self.R_LED.value(0)\r\n self.x_accel = x_accel\r\n self.y_accel = y_accel\r\n self.z_accel = z_accel\r\n \r\n if self.temp == None:\r\n self.temp = temp\r\n \r\n elif abs(self.temp - temp) < 1:\r\n self.G_LED.freq(10)\r\n elif abs(self.temp - temp) >= 1:\r\n if 10 + (5 * int(temp - self.temp)) < 0:\r\n self.G_LED.freq(0)\r\n elif temp - self.temp <= -1:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n else:\r\n self.G_LED.freq(10 + (5 * int(temp - self.temp)))\r\n \r\n return \"Successful Decryption\"", "def fetch_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n uraw = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.nii.gz'\n ubval = 'http://dl.dropbox.com/u/2481924/tawian_ntu_dsi.bval'\n ubvec = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.bvec'\n ureadme = 'http://dl.dropbox.com/u/2481924/license_taiwan_ntu_dsi.txt'\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n\n md5_list = ['950408c0980a7154cb188666a885a91f', # data\n '602e5cb5fad2e7163e8025011d8a6755', # bval\n 'a95eb1be44748c20214dc7aa654f9e6b', # bvec\n '7fa1d5e272533e832cc7453eeba23f44'] # license\n\n url_list = [uraw, ubval, ubvec, ureadme]\n fname_list = ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw DSI data (91MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n print('See DSI203_license.txt for LICENSE.')\n print('For the complete datasets please visit :')\n print('http://dsi-studio.labsolver.org')\n\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def DirDE():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n dec.Asm.New_Label = ''\n return\n\n register = -1\n reg = assem.GetWord().upper()\n if (len(reg) == 2 or len(reg) == 3) and reg[0] == 'R':\n # Can it be a register name? Must be 2 or 3 chars long and start with R\n reg = reg[1:]\n if reg.isdigit:\n # The register number must be numeric of course\n if len(reg) == 1 or reg[0] != '0':\n # It is numeric, without a leading 0\n register = int(reg)\n if register < 0 or register > 31:\n # It is not a legal register\n errors.DoError('badoper', False)\n dec.Asm.New_Label = ''\n else:\n # It is a legal register, set it's value\n dec.Asm.BOL_Address = register\n dec.Asm.List_Address = register\n dec.Asm.Mnemonic = '.SE' # Handle rest like .SE\n\n # Ignore more parameters this time (like .EQ).", "def kv_esx_init():\n disk_lib_init()", "def test_once(config, qemu_img=False):\n\n iotests.log(\"# ================= %s %s =================\" % (\n \"qemu-img\" if qemu_img else \"dm-crypt\", config))\n\n oneKB = 1024\n oneMB = oneKB * 1024\n oneGB = oneMB * 1024\n oneTB = oneGB * 1024\n\n # 4 TB, so that we pass the 32-bit sector number boundary.\n # Important for testing correctness of some IV generators\n # The files are sparse, so not actually using this much space\n image_size = 4 * oneTB\n if qemu_img:\n iotests.log(\"# Create image\")\n qemu_img_create(config, image_size / oneMB)\n else:\n iotests.log(\"# Create image\")\n create_image(config, image_size / oneMB)\n\n lowOffsetMB = 100\n highOffsetMB = 3 * oneTB / oneMB\n\n try:\n if not qemu_img:\n iotests.log(\"# Format image\")\n cryptsetup_format(config)\n\n for slot in config.active_slots()[1:]:\n iotests.log(\"# Add password slot %s\" % slot)\n cryptsetup_add_password(config, slot)\n\n # First we'll open the image using cryptsetup and write a\n # known pattern of data that we'll then verify with QEMU\n\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Write test pattern 0xa7\")\n qemu_io_write_pattern(config, 0xa7, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Write test pattern 0x13\")\n qemu_io_write_pattern(config, 0x13, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n\n # Ok, now we're using QEMU to verify the pattern just\n # written via dm-crypt\n\n iotests.log(\"# Read test pattern 0xa7\")\n qemu_io_read_pattern(config, 0xa7, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Read test pattern 0x13\")\n qemu_io_read_pattern(config, 0x13, highOffsetMB, 10, dev=False)\n\n\n # Write a new pattern to the image, which we'll later\n # verify with dm-crypt\n iotests.log(\"# Write test pattern 0x91\")\n qemu_io_write_pattern(config, 0x91, lowOffsetMB, 10, dev=False)\n iotests.log(\"# Write test pattern 0x5e\")\n qemu_io_write_pattern(config, 0x5e, highOffsetMB, 10, dev=False)\n\n\n # Now we're opening the image with dm-crypt once more\n # and verifying what QEMU wrote, completing the circle\n iotests.log(\"# Open dev\")\n cryptsetup_open(config)\n\n try:\n iotests.log(\"# Read test pattern 0x91\")\n qemu_io_read_pattern(config, 0x91, lowOffsetMB, 10, dev=True)\n iotests.log(\"# Read test pattern 0x5e\")\n qemu_io_read_pattern(config, 0x5e, highOffsetMB, 10, dev=True)\n finally:\n iotests.log(\"# Close dev\")\n cryptsetup_close(config)\n finally:\n iotests.log(\"# Delete image\")\n delete_image(config)\n print", "def operate_cipher(self):", "def run(filename=\"input.json\", path=\".\", **args):\n\n logger = logging.getLogger(__name__)\n \n #read input file (need to add command line specification)\n logger.info(\"Begin processing input file: %s\" % filename)\n eos_dict, thermo_dict, output_file = read_input.extract_calc_data(filename, path, **args)\n eos_dict['jit'] = args['jit']\n\n if output_file:\n file_dict = {\"output_file\":output_file}\n else:\n file_dict = {\"output_file\": \"despasito_out.txt\"}\n\n logger.debug(\"EOS dict:\", eos_dict)\n logger.debug(\"Thermo dict:\", thermo_dict)\n logger.info(\"Finish processing input file: {}\".format(filename))\n \n eos = eos_mod(**eos_dict)\n \n # Run either parametrization or thermodynamic calculation\n if \"opt_params\" in list(thermo_dict.keys()):\n logger.info(\"Initializing parametrization procedure\")\n output_dict = fit(eos, thermo_dict)\n #output = fit(eos, thermo_dict)\n logger.info(\"Finished parametrization\")\n write_output.writeout_fit_dict(output_dict,eos,**file_dict)\n else:\n logger.info(\"Initializing thermodynamic calculation\")\n output_dict = thermo(eos, thermo_dict)\n logger.info(\"Finished thermodynamic calculation\")\n write_output.writeout_thermo_dict(output_dict,thermo_dict[\"calculation_type\"],**file_dict)", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()" ]
[ "0.51993304", "0.5058645", "0.5000408", "0.49947664", "0.49831274", "0.49594375", "0.4924288", "0.49234203", "0.49121162", "0.4839938", "0.48386833", "0.48100945", "0.47855484", "0.47579652", "0.47441375", "0.47049582", "0.4687491", "0.46358106", "0.46306562", "0.4626821", "0.4613285", "0.45710376", "0.4567508", "0.45511374", "0.45368662", "0.45121735", "0.45081973", "0.4488692", "0.44831285", "0.4476393", "0.4467862", "0.44640857", "0.4455054", "0.44288892", "0.44137606", "0.44084302", "0.44077614", "0.43907297", "0.43866572", "0.43838525", "0.43829215", "0.4380234", "0.4373314", "0.4373314", "0.4364172", "0.43497834", "0.43413702", "0.43397835", "0.43393463", "0.43375152", "0.4334893", "0.43329522", "0.43290237", "0.43272546", "0.43248582", "0.43243632", "0.43152937", "0.43062627", "0.4303419", "0.43003693", "0.42947844", "0.42942867", "0.42842686", "0.42768413", "0.427595", "0.42757088", "0.42701563", "0.42686966", "0.42680416", "0.42672873", "0.42663124", "0.4264359", "0.42636", "0.42612547", "0.42501694", "0.4245171", "0.4242754", "0.4241064", "0.42372915", "0.4235205", "0.42306715", "0.42274505", "0.42260268", "0.42231452", "0.4216118", "0.4214229", "0.420762", "0.4204917", "0.42046046", "0.41932362", "0.41836935", "0.41820282", "0.4180952", "0.41785985", "0.4175652", "0.4173935", "0.416873", "0.41663504", "0.4165861", "0.41644675" ]
0.44778198
29
You can call this operation to enable or disable passwordfree access from the same VPC as an ApsaraDB for MongoDB instance.
def modify_instance_vpc_auth_mode_with_options( self, request: dds_20151201_models.ModifyInstanceVpcAuthModeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyInstanceVpcAuthModeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.vpc_auth_mode): query['VpcAuthMode'] = request.vpc_auth_mode req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyInstanceVpcAuthMode', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyInstanceVpcAuthModeResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def enable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": True},\n )", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "def disable_aaa_password_restriction(device):\n cmd=\"no aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa password restriction:\\n{e}'\n )", "def enable_aaa_password_restriction(device):\n cmd=\"aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa password restriction:\\n{e}'\n )", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def enable_ad():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADEnable 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Enabling Active Directory failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADType 2\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Setting of standard schema for AD failed \")", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def test_disable_virt_realm_remote_access(self):\n pass", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def enable_acm_fullaccess(self):\n self._request({\"enable-acm-fullaccess\": True})", "def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and all of\n # its descendants, into an environment where the only way of gaining\n # capabilities is by executing a program with associated file capabilities\"\n _call_c_style(\n libc,\n \"prctl\",\n PR_SET_SECUREBITS,\n (\n SECBIT_KEEP_CAPS_LOCKED\n | SECBIT_NO_SETUID_FIXUP\n | SECBIT_NO_SETUID_FIXUP_LOCKED\n | SECBIT_NOROOT\n | SECBIT_NOROOT_LOCKED\n ),\n 0,\n 0,\n 0,\n )", "def set_all_ports_admin_disabled(self):\n pass", "def disabled(config):\n disable(config)\n reload_service('apache2')", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def set_enable(self, pwd, type='secret'):\n\n if type == 'secret':\n cmd = 'enable secret %s' %(pwd)\n else:\n cmd = 'enable password %s' %(pwd)\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) set_enable : Attempting to set enable\" %(__name__))\n return(output)", "def set_dcb_admin_mode(self, ports, mode='Enabled'):\n pass", "def disable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n cmd = f'no aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa authentication login:\\n{e}'\n )", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "def enabled(config):\n enable(config)\n reload_service('apache2')", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def configure_masked_unmasked_enable_secret_password (device,\n password,\n privilege=None,\n ccp_name=None,\n algorithm_type=None,\n masked=True,\n secret=True,):\n cmd=\"enable \"\n if ccp_name :\n cmd+=f\" common-criteria-policy {ccp_name}\"\n if algorithm_type :\n cmd+=f\" algorithm-type {algorithm_type}\"\n if masked :\n cmd+=\" masked-secret\"\n elif secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege:\n cmd+=f\" level {privilege}\"\n if not(masked) :\n cmd+=f\" {password}\"\n\n masked_secret_dialog = Dialog(\n [\n Statement(\n pattern=r\".*Enter secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n ]\n )\n\n try:\n out=device.configure(cmd,reply=masked_secret_dialog)\n if re.search(r'[p|P]assword',out) and not(re.search(r'migrate',out)):\n raise SubCommandFailure(out)\n\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not configure enable password\"\n \"Error: {error}\".format(error=e)\n )", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def enable_autoscaling_readonly(self):\n self._request({\"enable-autoscaling-readonly\": True})", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def enable_acm_readonly(self):\n self._request({\"enable-acm-readonly\": True})", "def enable_private_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_private_endpoint\")", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def cmd_enable_private(self, argument):\n if self.bot.admins.authenticate(argument):\n self.bot.admins.add(self.nick)\n self.send(self.nick, _(\"User %s added to admins\"), self.nick)\n self.logger.info(\"User %s added to admins\" % self.nick)\n else:\n self.bot.admins.remove(self.nick)\n self.logger.warning(\"User %s tried to elevate privileges with wrong password '%s'\" % (self.nick, argument))", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def test_edit_user_enable_permit_sudo(driver):\n pass", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def protect_endpoint():\n pass", "def firewallOff():\n pass", "def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])", "def account_api_password_disable(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_disable.html')\n profile = request.user\n profile.set_api_password(None)\n profile.save()\n profile.log_change(request.user, \"Deleted own api password.\")\n messages.success(request, \"Your api password has been disabled.\")\n return redirect(account_edit)", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def disable_primary_site_administrator(self):\n dURL = self._url + \"/psa/disable\"\n params = {\n \"f\" : \"json\"\n }\n return self._con.post(path=dURL, postdata=params)", "def enable_root(self):\n return self.client.post(self.path+'/root')['user']['password']", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def disable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -d\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf disabled\", \"changes\": True}\n else:\n # If pf was already disabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf not enabled\":\n ret = {\"comment\": \"pf already disabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not disable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def secureMySQL(dry=False):\n \n #is default for mysql on ubuntu 14.0.4\n #bind-address = 127.0.0.1\n pass", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})", "def unconfigure_aaa_authentication_enable(device):\n\n cmd = f'no aaa authentication enable default'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authentication enable:\\n{e}'\n )", "def enable_password_policy(self) -> bool:\n return pulumi.get(self, \"enable_password_policy\")", "def remove_auth_backend():\n\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Disabling auth backend for cluster {0}'.format(args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True", "def enable_auth_backend():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n data = {\"type\": \"kubernetes\"}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Enabling auth backend of type kubernetes for {0}'.format(args.k8s_cluster_name)\n req = send_post(url=url, data=data, headers=headers, return_output=True)\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n if 'path is already in use' in req.content:\n print 'NOTE: Auth backend already enabled, which means the cluster is already setup on Vault.'\n print 'NOTE: Moving forward to Role creation, which is namespace-based'\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def cmd_enable(self, app_name=None):\n rc = self.socket_command_with_project('enable', app_name)\n return rc", "def test_enable_virt_realm_remote_access(self):\n pass", "def pg_allow_replication(self, user, password, ip_ranges, restart=True):\n\n # XXX: does not support differing primary/replica pg versions\n self.create_db_user(user, password, replication=True)\n files.uncomment(self.pg_hba, \"local +replication\", use_sudo=True)\n for ip_range in ip_ranges:\n hostssl_line = (\n f\"hostssl replication all {ip_range} {self.pg_pw_encryption}\"\n )\n files.append(self.pg_hba, hostssl_line, use_sudo=True)\n if restart:\n sudo(\"service postgresql restart\")", "def auth_with_guest_security_db(self, sUserName, sUserPassword, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_AuthWithGuestSecurityDb(self.handle, sUserName, sUserPassword, nFlags)[0])", "def enable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -e\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf enabled\", \"changes\": True}\n else:\n # If pf was already enabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf already enabled\":\n ret = {\"comment\": \"pf already enabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not enable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def _update_celery_ldap_settings(self, node_roles):\n env.pstat_settings['enable_celery_ldap'] = False\n env.enable_celery_ldap = False\n if 'has_ldap_access' in node_roles:\n logger.info(\"Configuring node to run celery_ldap\")\n env.pstat_settings['enable_celery_ldap'] = True\n env.enable_celery_ldap = True\n return\n\n logger.info(\"Node not configured to run celery_ldap\")", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def disable_user(conn, user_dn):\n try:\n conn.modify(str(user_dn), {'userAccountControl': [(MODIFY_REPLACE, ['514'])]})\n except Exception as e:\n raise Exception(\"Can't disable the user :: {}\".format(e))", "def disable_auth(self):\n\n self._api_manager.disable_auth()", "def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")", "def enable():\n request = dict(id='gbn')\n _gbn_enable(request)", "def setLoopback(self, enable): \n if enable == True:\n DPxEnableDoutDinLoopback()\n else:\n DPxDisableDoutDinLoopback()", "def create_mongo_user():\n mongo_url = \"mongodb://mongo:mongo@code7-mongo/admin\"\n database = \"code7\"\n username = \"code7\"\n password = \"code7\"\n\n client = pymongo.MongoClient(mongo_url)\n mongo_db = pymongo.database.Database(client, database)\n\n mongo_db.add_user(\n username,\n password=password,\n **{\"roles\": [{\"role\": \"readWrite\", \"db\": database}, {\"role\": \"dbAdmin\", \"db\": database}]}\n )", "def unconfigure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"no aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA auth proxy'\n )", "def getclient(hosts,replicaset,db_name,db_user,db_pwd):\n conn = MongoClient(hosts,replicaSet=replicaset,read_preference=ReadPreference.SECONDARY_PREFERRED)\n conn[db_name].authenticate(db_user,db_pwd,mechanism='SCRAM-SHA-1')\n return conn", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()", "def fusion_api_enable_pool(self, body, uri, api=None, headers=None):\n return self.idpool.enable(body, uri, api, headers)", "def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]", "def login_mongodb_cloud():\r\n\r\n try:\r\n config.read(config_file)\r\n user = config[\"mongodb_cloud\"][\"user\"]\r\n pw = config[\"mongodb_cloud\"][\"pw\"]\r\n print(f'Got user=***** pw=***** from {config_file}')\r\n except Exception as e:\r\n print(f'Error parsing {config_file}: {e}')\r\n\r\n client = pymongo.MongoClient(f'mongodb+srv://{user}:{pw}'\r\n '@cluster0-np6jb.gcp.mongodb.net/test'\r\n '?retryWrites=true')\r\n\r\n return client", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def disable_root_login():\n sudo('passwd --lock root')", "def enableOrDisableFeature(self, enable):\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # pre-validation\n self._helpers.validateFeatureIsSupported(scalable_pmem_config)\n self._helpers.validateFunctionalityIsEnabled(scalable_pmem_config)\n\n if enable is False:\n # If user disables Scalable PMEM, revert any pending changes to\n # prevent data or configuration loss\n if self._rdmc.interactive:\n message = u\"Warning: disabling Scalable Persistent Memory will \"\\\n \"revert any pending configuration changes.\\n\"\n self._helpers.confirmChanges(message=message)\n self._restHelpers.revertSettings()\n\n patchAttributes = {\n \"FeatureEnabled\" : enable\n }\n _ = self._restHelpers.patchScalablePmemSettingAttributes(patchAttributes)\n\n sys.stdout.write(u\"\\nThe Scalable Persistent Memory feature has been \"\\\n \"set to: {}\\n\".format(\"Enabled\" if enable else \"Disabled\"))\n\n self._helpers.noticeRestartRequired(scalable_pmem_config)\n\n sys.stdout.write(\"\\n\\n\")", "def enable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)", "def set_management_https(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-https>{}</disable-https>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def test_modify_znode(self):\n z = self.test_start_one_value()\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"0\"}})", "def enable_root_user(self, instance):\n return instance.enable_root_user()", "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n password=quote_plus(password),\n host=quote_plus(host),\n port=port,\n dbname=dbname)\n else:\n uri = \"mongodb://{host}:{port}/{dbname}\".format(host=host, port=port, dbname=dbname)\n cls._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(uri, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)\n #LoopRunTask.register(cls._check_connection, 2)\n SingleTask.call_later(cls._check_connection, 2) #模拟串行定时器,避免并发\n logger.info(\"create mongodb connection pool.\")", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):" ]
[ "0.57480425", "0.5616109", "0.55412394", "0.5352928", "0.53207445", "0.52173084", "0.51425046", "0.512292", "0.5096007", "0.50629604", "0.5048997", "0.503538", "0.4975747", "0.49570495", "0.4949226", "0.4946102", "0.49304068", "0.49299124", "0.49176344", "0.4874947", "0.48682445", "0.48331422", "0.483283", "0.48193747", "0.4815568", "0.4813538", "0.48088914", "0.4804145", "0.47997385", "0.4792705", "0.4792298", "0.47736803", "0.47566923", "0.4752925", "0.47513282", "0.47477615", "0.47448245", "0.47365305", "0.4734731", "0.47278857", "0.47272596", "0.47073412", "0.4695234", "0.46950984", "0.46866864", "0.4679198", "0.4678995", "0.46664634", "0.46565065", "0.4646805", "0.46447086", "0.4636189", "0.4634883", "0.46327734", "0.4622032", "0.4621598", "0.46135208", "0.4591871", "0.45851704", "0.4583406", "0.45766675", "0.4573104", "0.45705956", "0.45653665", "0.45541874", "0.4550447", "0.45480707", "0.4539546", "0.4538501", "0.45360494", "0.453458", "0.45296443", "0.45272934", "0.4518018", "0.45094267", "0.44953552", "0.4493863", "0.4490414", "0.44837147", "0.44784543", "0.44688037", "0.44685495", "0.4466653", "0.4459641", "0.44556442", "0.4448464", "0.44423586", "0.44394097", "0.4434472", "0.44344372", "0.4433835", "0.44332463", "0.4426373", "0.44247428", "0.4420346", "0.44164786", "0.44164786", "0.44164786", "0.44164786", "0.44164786", "0.44164786" ]
0.0
-1
You can call this operation to enable or disable passwordfree access from the same VPC as an ApsaraDB for MongoDB instance.
async def modify_instance_vpc_auth_mode_with_options_async( self, request: dds_20151201_models.ModifyInstanceVpcAuthModeRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyInstanceVpcAuthModeResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token if not UtilClient.is_unset(request.vpc_auth_mode): query['VpcAuthMode'] = request.vpc_auth_mode req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyInstanceVpcAuthMode', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyInstanceVpcAuthModeResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def enable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": True},\n )", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "def disable_aaa_password_restriction(device):\n cmd=\"no aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa password restriction:\\n{e}'\n )", "def enable_aaa_password_restriction(device):\n cmd=\"aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa password restriction:\\n{e}'\n )", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def enable_ad():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADEnable 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Enabling Active Directory failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADType 2\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Setting of standard schema for AD failed \")", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def test_disable_virt_realm_remote_access(self):\n pass", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def enable_acm_fullaccess(self):\n self._request({\"enable-acm-fullaccess\": True})", "def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and all of\n # its descendants, into an environment where the only way of gaining\n # capabilities is by executing a program with associated file capabilities\"\n _call_c_style(\n libc,\n \"prctl\",\n PR_SET_SECUREBITS,\n (\n SECBIT_KEEP_CAPS_LOCKED\n | SECBIT_NO_SETUID_FIXUP\n | SECBIT_NO_SETUID_FIXUP_LOCKED\n | SECBIT_NOROOT\n | SECBIT_NOROOT_LOCKED\n ),\n 0,\n 0,\n 0,\n )", "def set_all_ports_admin_disabled(self):\n pass", "def disabled(config):\n disable(config)\n reload_service('apache2')", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def set_enable(self, pwd, type='secret'):\n\n if type == 'secret':\n cmd = 'enable secret %s' %(pwd)\n else:\n cmd = 'enable password %s' %(pwd)\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) set_enable : Attempting to set enable\" %(__name__))\n return(output)", "def set_dcb_admin_mode(self, ports, mode='Enabled'):\n pass", "def disable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n cmd = f'no aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa authentication login:\\n{e}'\n )", "def enabled(config):\n enable(config)\n reload_service('apache2')", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def configure_masked_unmasked_enable_secret_password (device,\n password,\n privilege=None,\n ccp_name=None,\n algorithm_type=None,\n masked=True,\n secret=True,):\n cmd=\"enable \"\n if ccp_name :\n cmd+=f\" common-criteria-policy {ccp_name}\"\n if algorithm_type :\n cmd+=f\" algorithm-type {algorithm_type}\"\n if masked :\n cmd+=\" masked-secret\"\n elif secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege:\n cmd+=f\" level {privilege}\"\n if not(masked) :\n cmd+=f\" {password}\"\n\n masked_secret_dialog = Dialog(\n [\n Statement(\n pattern=r\".*Enter secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n ]\n )\n\n try:\n out=device.configure(cmd,reply=masked_secret_dialog)\n if re.search(r'[p|P]assword',out) and not(re.search(r'migrate',out)):\n raise SubCommandFailure(out)\n\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not configure enable password\"\n \"Error: {error}\".format(error=e)\n )", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def enable_autoscaling_readonly(self):\n self._request({\"enable-autoscaling-readonly\": True})", "def enable_acm_readonly(self):\n self._request({\"enable-acm-readonly\": True})", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def enable_private_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_private_endpoint\")", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled", "def cmd_enable_private(self, argument):\n if self.bot.admins.authenticate(argument):\n self.bot.admins.add(self.nick)\n self.send(self.nick, _(\"User %s added to admins\"), self.nick)\n self.logger.info(\"User %s added to admins\" % self.nick)\n else:\n self.bot.admins.remove(self.nick)\n self.logger.warning(\"User %s tried to elevate privileges with wrong password '%s'\" % (self.nick, argument))", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def test_edit_user_enable_permit_sudo(driver):\n pass", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def protect_endpoint():\n pass", "def firewallOff():\n pass", "def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])", "def account_api_password_disable(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_disable.html')\n profile = request.user\n profile.set_api_password(None)\n profile.save()\n profile.log_change(request.user, \"Deleted own api password.\")\n messages.success(request, \"Your api password has been disabled.\")\n return redirect(account_edit)", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def disable_primary_site_administrator(self):\n dURL = self._url + \"/psa/disable\"\n params = {\n \"f\" : \"json\"\n }\n return self._con.post(path=dURL, postdata=params)", "def enable_root(self):\n return self.client.post(self.path+'/root')['user']['password']", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "def secureMySQL(dry=False):\n \n #is default for mysql on ubuntu 14.0.4\n #bind-address = 127.0.0.1\n pass", "def disable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -d\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf disabled\", \"changes\": True}\n else:\n # If pf was already disabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf not enabled\":\n ret = {\"comment\": \"pf already disabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not disable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})", "def unconfigure_aaa_authentication_enable(device):\n\n cmd = f'no aaa authentication enable default'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authentication enable:\\n{e}'\n )", "def enable_password_policy(self) -> bool:\n return pulumi.get(self, \"enable_password_policy\")", "def remove_auth_backend():\n\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Disabling auth backend for cluster {0}'.format(args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def enable_auth_backend():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n data = {\"type\": \"kubernetes\"}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Enabling auth backend of type kubernetes for {0}'.format(args.k8s_cluster_name)\n req = send_post(url=url, data=data, headers=headers, return_output=True)\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n if 'path is already in use' in req.content:\n print 'NOTE: Auth backend already enabled, which means the cluster is already setup on Vault.'\n print 'NOTE: Moving forward to Role creation, which is namespace-based'\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True", "def cmd_enable(self, app_name=None):\n rc = self.socket_command_with_project('enable', app_name)\n return rc", "def test_enable_virt_realm_remote_access(self):\n pass", "def pg_allow_replication(self, user, password, ip_ranges, restart=True):\n\n # XXX: does not support differing primary/replica pg versions\n self.create_db_user(user, password, replication=True)\n files.uncomment(self.pg_hba, \"local +replication\", use_sudo=True)\n for ip_range in ip_ranges:\n hostssl_line = (\n f\"hostssl replication all {ip_range} {self.pg_pw_encryption}\"\n )\n files.append(self.pg_hba, hostssl_line, use_sudo=True)\n if restart:\n sudo(\"service postgresql restart\")", "def auth_with_guest_security_db(self, sUserName, sUserPassword, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_AuthWithGuestSecurityDb(self.handle, sUserName, sUserPassword, nFlags)[0])", "def enable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -e\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf enabled\", \"changes\": True}\n else:\n # If pf was already enabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf already enabled\":\n ret = {\"comment\": \"pf already enabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not enable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def _update_celery_ldap_settings(self, node_roles):\n env.pstat_settings['enable_celery_ldap'] = False\n env.enable_celery_ldap = False\n if 'has_ldap_access' in node_roles:\n logger.info(\"Configuring node to run celery_ldap\")\n env.pstat_settings['enable_celery_ldap'] = True\n env.enable_celery_ldap = True\n return\n\n logger.info(\"Node not configured to run celery_ldap\")", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def disable_user(conn, user_dn):\n try:\n conn.modify(str(user_dn), {'userAccountControl': [(MODIFY_REPLACE, ['514'])]})\n except Exception as e:\n raise Exception(\"Can't disable the user :: {}\".format(e))", "def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")", "def disable_auth(self):\n\n self._api_manager.disable_auth()", "def enable():\n request = dict(id='gbn')\n _gbn_enable(request)", "def setLoopback(self, enable): \n if enable == True:\n DPxEnableDoutDinLoopback()\n else:\n DPxDisableDoutDinLoopback()", "def create_mongo_user():\n mongo_url = \"mongodb://mongo:mongo@code7-mongo/admin\"\n database = \"code7\"\n username = \"code7\"\n password = \"code7\"\n\n client = pymongo.MongoClient(mongo_url)\n mongo_db = pymongo.database.Database(client, database)\n\n mongo_db.add_user(\n username,\n password=password,\n **{\"roles\": [{\"role\": \"readWrite\", \"db\": database}, {\"role\": \"dbAdmin\", \"db\": database}]}\n )", "def getclient(hosts,replicaset,db_name,db_user,db_pwd):\n conn = MongoClient(hosts,replicaSet=replicaset,read_preference=ReadPreference.SECONDARY_PREFERRED)\n conn[db_name].authenticate(db_user,db_pwd,mechanism='SCRAM-SHA-1')\n return conn", "def unconfigure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"no aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA auth proxy'\n )", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()", "def fusion_api_enable_pool(self, body, uri, api=None, headers=None):\n return self.idpool.enable(body, uri, api, headers)", "def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]", "def login_mongodb_cloud():\r\n\r\n try:\r\n config.read(config_file)\r\n user = config[\"mongodb_cloud\"][\"user\"]\r\n pw = config[\"mongodb_cloud\"][\"pw\"]\r\n print(f'Got user=***** pw=***** from {config_file}')\r\n except Exception as e:\r\n print(f'Error parsing {config_file}: {e}')\r\n\r\n client = pymongo.MongoClient(f'mongodb+srv://{user}:{pw}'\r\n '@cluster0-np6jb.gcp.mongodb.net/test'\r\n '?retryWrites=true')\r\n\r\n return client", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def enableOrDisableFeature(self, enable):\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # pre-validation\n self._helpers.validateFeatureIsSupported(scalable_pmem_config)\n self._helpers.validateFunctionalityIsEnabled(scalable_pmem_config)\n\n if enable is False:\n # If user disables Scalable PMEM, revert any pending changes to\n # prevent data or configuration loss\n if self._rdmc.interactive:\n message = u\"Warning: disabling Scalable Persistent Memory will \"\\\n \"revert any pending configuration changes.\\n\"\n self._helpers.confirmChanges(message=message)\n self._restHelpers.revertSettings()\n\n patchAttributes = {\n \"FeatureEnabled\" : enable\n }\n _ = self._restHelpers.patchScalablePmemSettingAttributes(patchAttributes)\n\n sys.stdout.write(u\"\\nThe Scalable Persistent Memory feature has been \"\\\n \"set to: {}\\n\".format(\"Enabled\" if enable else \"Disabled\"))\n\n self._helpers.noticeRestartRequired(scalable_pmem_config)\n\n sys.stdout.write(\"\\n\\n\")", "def enable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)", "def disable_root_login():\n sudo('passwd --lock root')", "def set_management_https(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-https>{}</disable-https>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def enable_root_user(self, instance):\n return instance.enable_root_user()", "def test_modify_znode(self):\n z = self.test_start_one_value()\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"0\"}})", "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n password=quote_plus(password),\n host=quote_plus(host),\n port=port,\n dbname=dbname)\n else:\n uri = \"mongodb://{host}:{port}/{dbname}\".format(host=host, port=port, dbname=dbname)\n cls._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(uri, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)\n #LoopRunTask.register(cls._check_connection, 2)\n SingleTask.call_later(cls._check_connection, 2) #模拟串行定时器,避免并发\n logger.info(\"create mongodb connection pool.\")", "def enable(self) -> None:", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):" ]
[ "0.5749136", "0.5617548", "0.5538639", "0.53506464", "0.5320414", "0.5219431", "0.5143724", "0.51200616", "0.5097675", "0.5063958", "0.5047001", "0.50354904", "0.49778882", "0.4954366", "0.4950596", "0.49457756", "0.49323213", "0.49296176", "0.4914757", "0.48719049", "0.48681757", "0.48321223", "0.48318127", "0.48194486", "0.48138955", "0.48135105", "0.4808147", "0.48049033", "0.47967356", "0.479095", "0.4790247", "0.47750187", "0.47553006", "0.47551307", "0.47502398", "0.4748606", "0.47433484", "0.47349584", "0.47341573", "0.47263932", "0.47254634", "0.47049856", "0.46948725", "0.46923175", "0.46837372", "0.4676218", "0.46744514", "0.46687132", "0.46531925", "0.46486807", "0.46427575", "0.46364906", "0.46333495", "0.46331012", "0.46203375", "0.46202594", "0.46139884", "0.45932722", "0.45858723", "0.45851964", "0.4574876", "0.45725822", "0.4571267", "0.45669663", "0.45543563", "0.4551498", "0.45483828", "0.4542439", "0.45390692", "0.45375934", "0.45330805", "0.45297235", "0.45273688", "0.4516985", "0.4507814", "0.44941434", "0.44933677", "0.44923759", "0.4483157", "0.4478959", "0.44690946", "0.4468067", "0.44660217", "0.4456899", "0.44545814", "0.4448115", "0.44428164", "0.44411463", "0.443574", "0.44340232", "0.44333753", "0.4432191", "0.44263873", "0.44247696", "0.4419746", "0.44171497", "0.44165114", "0.44165114", "0.44165114", "0.44165114", "0.44165114" ]
0.0
-1
You can call this operation to enable or disable passwordfree access from the same VPC as an ApsaraDB for MongoDB instance.
def modify_instance_vpc_auth_mode( self, request: dds_20151201_models.ModifyInstanceVpcAuthModeRequest, ) -> dds_20151201_models.ModifyInstanceVpcAuthModeResponse: runtime = util_models.RuntimeOptions() return self.modify_instance_vpc_auth_mode_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def enable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": True},\n )", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "def disable_aaa_password_restriction(device):\n cmd=\"no aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa password restriction:\\n{e}'\n )", "def enable_aaa_password_restriction(device):\n cmd=\"aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa password restriction:\\n{e}'\n )", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def enable_ad():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADEnable 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Enabling Active Directory failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADType 2\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Setting of standard schema for AD failed \")", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def test_disable_virt_realm_remote_access(self):\n pass", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def enable_acm_fullaccess(self):\n self._request({\"enable-acm-fullaccess\": True})", "def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and all of\n # its descendants, into an environment where the only way of gaining\n # capabilities is by executing a program with associated file capabilities\"\n _call_c_style(\n libc,\n \"prctl\",\n PR_SET_SECUREBITS,\n (\n SECBIT_KEEP_CAPS_LOCKED\n | SECBIT_NO_SETUID_FIXUP\n | SECBIT_NO_SETUID_FIXUP_LOCKED\n | SECBIT_NOROOT\n | SECBIT_NOROOT_LOCKED\n ),\n 0,\n 0,\n 0,\n )", "def set_all_ports_admin_disabled(self):\n pass", "def disabled(config):\n disable(config)\n reload_service('apache2')", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def set_enable(self, pwd, type='secret'):\n\n if type == 'secret':\n cmd = 'enable secret %s' %(pwd)\n else:\n cmd = 'enable password %s' %(pwd)\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) set_enable : Attempting to set enable\" %(__name__))\n return(output)", "def set_dcb_admin_mode(self, ports, mode='Enabled'):\n pass", "def disable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n cmd = f'no aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa authentication login:\\n{e}'\n )", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "def enabled(config):\n enable(config)\n reload_service('apache2')", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def configure_masked_unmasked_enable_secret_password (device,\n password,\n privilege=None,\n ccp_name=None,\n algorithm_type=None,\n masked=True,\n secret=True,):\n cmd=\"enable \"\n if ccp_name :\n cmd+=f\" common-criteria-policy {ccp_name}\"\n if algorithm_type :\n cmd+=f\" algorithm-type {algorithm_type}\"\n if masked :\n cmd+=\" masked-secret\"\n elif secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege:\n cmd+=f\" level {privilege}\"\n if not(masked) :\n cmd+=f\" {password}\"\n\n masked_secret_dialog = Dialog(\n [\n Statement(\n pattern=r\".*Enter secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n ]\n )\n\n try:\n out=device.configure(cmd,reply=masked_secret_dialog)\n if re.search(r'[p|P]assword',out) and not(re.search(r'migrate',out)):\n raise SubCommandFailure(out)\n\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not configure enable password\"\n \"Error: {error}\".format(error=e)\n )", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def enable_autoscaling_readonly(self):\n self._request({\"enable-autoscaling-readonly\": True})", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def enable_acm_readonly(self):\n self._request({\"enable-acm-readonly\": True})", "def enable_private_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_private_endpoint\")", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def cmd_enable_private(self, argument):\n if self.bot.admins.authenticate(argument):\n self.bot.admins.add(self.nick)\n self.send(self.nick, _(\"User %s added to admins\"), self.nick)\n self.logger.info(\"User %s added to admins\" % self.nick)\n else:\n self.bot.admins.remove(self.nick)\n self.logger.warning(\"User %s tried to elevate privileges with wrong password '%s'\" % (self.nick, argument))", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def test_edit_user_enable_permit_sudo(driver):\n pass", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def protect_endpoint():\n pass", "def firewallOff():\n pass", "def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])", "def account_api_password_disable(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_disable.html')\n profile = request.user\n profile.set_api_password(None)\n profile.save()\n profile.log_change(request.user, \"Deleted own api password.\")\n messages.success(request, \"Your api password has been disabled.\")\n return redirect(account_edit)", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def disable_primary_site_administrator(self):\n dURL = self._url + \"/psa/disable\"\n params = {\n \"f\" : \"json\"\n }\n return self._con.post(path=dURL, postdata=params)", "def enable_root(self):\n return self.client.post(self.path+'/root')['user']['password']", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def disable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -d\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf disabled\", \"changes\": True}\n else:\n # If pf was already disabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf not enabled\":\n ret = {\"comment\": \"pf already disabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not disable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def secureMySQL(dry=False):\n \n #is default for mysql on ubuntu 14.0.4\n #bind-address = 127.0.0.1\n pass", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})", "def unconfigure_aaa_authentication_enable(device):\n\n cmd = f'no aaa authentication enable default'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authentication enable:\\n{e}'\n )", "def enable_password_policy(self) -> bool:\n return pulumi.get(self, \"enable_password_policy\")", "def remove_auth_backend():\n\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Disabling auth backend for cluster {0}'.format(args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True", "def enable_auth_backend():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n data = {\"type\": \"kubernetes\"}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Enabling auth backend of type kubernetes for {0}'.format(args.k8s_cluster_name)\n req = send_post(url=url, data=data, headers=headers, return_output=True)\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n if 'path is already in use' in req.content:\n print 'NOTE: Auth backend already enabled, which means the cluster is already setup on Vault.'\n print 'NOTE: Moving forward to Role creation, which is namespace-based'\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def cmd_enable(self, app_name=None):\n rc = self.socket_command_with_project('enable', app_name)\n return rc", "def test_enable_virt_realm_remote_access(self):\n pass", "def pg_allow_replication(self, user, password, ip_ranges, restart=True):\n\n # XXX: does not support differing primary/replica pg versions\n self.create_db_user(user, password, replication=True)\n files.uncomment(self.pg_hba, \"local +replication\", use_sudo=True)\n for ip_range in ip_ranges:\n hostssl_line = (\n f\"hostssl replication all {ip_range} {self.pg_pw_encryption}\"\n )\n files.append(self.pg_hba, hostssl_line, use_sudo=True)\n if restart:\n sudo(\"service postgresql restart\")", "def auth_with_guest_security_db(self, sUserName, sUserPassword, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_AuthWithGuestSecurityDb(self.handle, sUserName, sUserPassword, nFlags)[0])", "def enable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -e\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf enabled\", \"changes\": True}\n else:\n # If pf was already enabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf already enabled\":\n ret = {\"comment\": \"pf already enabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not enable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def _update_celery_ldap_settings(self, node_roles):\n env.pstat_settings['enable_celery_ldap'] = False\n env.enable_celery_ldap = False\n if 'has_ldap_access' in node_roles:\n logger.info(\"Configuring node to run celery_ldap\")\n env.pstat_settings['enable_celery_ldap'] = True\n env.enable_celery_ldap = True\n return\n\n logger.info(\"Node not configured to run celery_ldap\")", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def disable_user(conn, user_dn):\n try:\n conn.modify(str(user_dn), {'userAccountControl': [(MODIFY_REPLACE, ['514'])]})\n except Exception as e:\n raise Exception(\"Can't disable the user :: {}\".format(e))", "def disable_auth(self):\n\n self._api_manager.disable_auth()", "def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")", "def enable():\n request = dict(id='gbn')\n _gbn_enable(request)", "def setLoopback(self, enable): \n if enable == True:\n DPxEnableDoutDinLoopback()\n else:\n DPxDisableDoutDinLoopback()", "def create_mongo_user():\n mongo_url = \"mongodb://mongo:mongo@code7-mongo/admin\"\n database = \"code7\"\n username = \"code7\"\n password = \"code7\"\n\n client = pymongo.MongoClient(mongo_url)\n mongo_db = pymongo.database.Database(client, database)\n\n mongo_db.add_user(\n username,\n password=password,\n **{\"roles\": [{\"role\": \"readWrite\", \"db\": database}, {\"role\": \"dbAdmin\", \"db\": database}]}\n )", "def unconfigure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"no aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA auth proxy'\n )", "def getclient(hosts,replicaset,db_name,db_user,db_pwd):\n conn = MongoClient(hosts,replicaSet=replicaset,read_preference=ReadPreference.SECONDARY_PREFERRED)\n conn[db_name].authenticate(db_user,db_pwd,mechanism='SCRAM-SHA-1')\n return conn", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()", "def fusion_api_enable_pool(self, body, uri, api=None, headers=None):\n return self.idpool.enable(body, uri, api, headers)", "def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]", "def login_mongodb_cloud():\r\n\r\n try:\r\n config.read(config_file)\r\n user = config[\"mongodb_cloud\"][\"user\"]\r\n pw = config[\"mongodb_cloud\"][\"pw\"]\r\n print(f'Got user=***** pw=***** from {config_file}')\r\n except Exception as e:\r\n print(f'Error parsing {config_file}: {e}')\r\n\r\n client = pymongo.MongoClient(f'mongodb+srv://{user}:{pw}'\r\n '@cluster0-np6jb.gcp.mongodb.net/test'\r\n '?retryWrites=true')\r\n\r\n return client", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def disable_root_login():\n sudo('passwd --lock root')", "def enableOrDisableFeature(self, enable):\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # pre-validation\n self._helpers.validateFeatureIsSupported(scalable_pmem_config)\n self._helpers.validateFunctionalityIsEnabled(scalable_pmem_config)\n\n if enable is False:\n # If user disables Scalable PMEM, revert any pending changes to\n # prevent data or configuration loss\n if self._rdmc.interactive:\n message = u\"Warning: disabling Scalable Persistent Memory will \"\\\n \"revert any pending configuration changes.\\n\"\n self._helpers.confirmChanges(message=message)\n self._restHelpers.revertSettings()\n\n patchAttributes = {\n \"FeatureEnabled\" : enable\n }\n _ = self._restHelpers.patchScalablePmemSettingAttributes(patchAttributes)\n\n sys.stdout.write(u\"\\nThe Scalable Persistent Memory feature has been \"\\\n \"set to: {}\\n\".format(\"Enabled\" if enable else \"Disabled\"))\n\n self._helpers.noticeRestartRequired(scalable_pmem_config)\n\n sys.stdout.write(\"\\n\\n\")", "def enable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)", "def set_management_https(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-https>{}</disable-https>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def test_modify_znode(self):\n z = self.test_start_one_value()\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"0\"}})", "def enable_root_user(self, instance):\n return instance.enable_root_user()", "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n password=quote_plus(password),\n host=quote_plus(host),\n port=port,\n dbname=dbname)\n else:\n uri = \"mongodb://{host}:{port}/{dbname}\".format(host=host, port=port, dbname=dbname)\n cls._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(uri, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)\n #LoopRunTask.register(cls._check_connection, 2)\n SingleTask.call_later(cls._check_connection, 2) #模拟串行定时器,避免并发\n logger.info(\"create mongodb connection pool.\")", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):" ]
[ "0.57480425", "0.5616109", "0.55412394", "0.5352928", "0.53207445", "0.52173084", "0.51425046", "0.512292", "0.5096007", "0.50629604", "0.5048997", "0.503538", "0.4975747", "0.49570495", "0.4949226", "0.4946102", "0.49304068", "0.49299124", "0.49176344", "0.4874947", "0.48682445", "0.48331422", "0.483283", "0.48193747", "0.4815568", "0.4813538", "0.48088914", "0.4804145", "0.47997385", "0.4792705", "0.4792298", "0.47736803", "0.47566923", "0.4752925", "0.47513282", "0.47477615", "0.47448245", "0.47365305", "0.4734731", "0.47278857", "0.47272596", "0.47073412", "0.4695234", "0.46950984", "0.46866864", "0.4679198", "0.4678995", "0.46664634", "0.46565065", "0.4646805", "0.46447086", "0.4636189", "0.4634883", "0.46327734", "0.4622032", "0.4621598", "0.46135208", "0.4591871", "0.45851704", "0.4583406", "0.45766675", "0.4573104", "0.45705956", "0.45653665", "0.45541874", "0.4550447", "0.45480707", "0.4539546", "0.4538501", "0.45360494", "0.453458", "0.45296443", "0.45272934", "0.4518018", "0.45094267", "0.44953552", "0.4493863", "0.4490414", "0.44837147", "0.44784543", "0.44688037", "0.44685495", "0.4466653", "0.4459641", "0.44556442", "0.4448464", "0.44423586", "0.44394097", "0.4434472", "0.44344372", "0.4433835", "0.44332463", "0.4426373", "0.44247428", "0.4420346", "0.44164786", "0.44164786", "0.44164786", "0.44164786", "0.44164786", "0.44164786" ]
0.0
-1
You can call this operation to enable or disable passwordfree access from the same VPC as an ApsaraDB for MongoDB instance.
async def modify_instance_vpc_auth_mode_async( self, request: dds_20151201_models.ModifyInstanceVpcAuthModeRequest, ) -> dds_20151201_models.ModifyInstanceVpcAuthModeResponse: runtime = util_models.RuntimeOptions() return await self.modify_instance_vpc_auth_mode_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def enable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": True},\n )", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "async def disable_protection(self) -> None:\n await self._request(\n \"dns_config\", method=\"POST\", json_data={\"protection_enabled\": False},\n )", "def disable_aaa_password_restriction(device):\n cmd=\"no aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa password restriction:\\n{e}'\n )", "def enable_aaa_password_restriction(device):\n cmd=\"aaa password restriction\"\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure aaa password restriction:\\n{e}'\n )", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def enable_ad():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADEnable 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Enabling Active Directory failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADType 2\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Setting of standard schema for AD failed \")", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def test_disable_virt_realm_remote_access(self):\n pass", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection", "def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.enable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def enable_acm_fullaccess(self):\n self._request({\"enable-acm-fullaccess\": True})", "def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and all of\n # its descendants, into an environment where the only way of gaining\n # capabilities is by executing a program with associated file capabilities\"\n _call_c_style(\n libc,\n \"prctl\",\n PR_SET_SECUREBITS,\n (\n SECBIT_KEEP_CAPS_LOCKED\n | SECBIT_NO_SETUID_FIXUP\n | SECBIT_NO_SETUID_FIXUP_LOCKED\n | SECBIT_NOROOT\n | SECBIT_NOROOT_LOCKED\n ),\n 0,\n 0,\n 0,\n )", "def set_all_ports_admin_disabled(self):\n pass", "def disabled(config):\n disable(config)\n reload_service('apache2')", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def set_enable(self, pwd, type='secret'):\n\n if type == 'secret':\n cmd = 'enable secret %s' %(pwd)\n else:\n cmd = 'enable password %s' %(pwd)\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) set_enable : Attempting to set enable\" %(__name__))\n return(output)", "def set_dcb_admin_mode(self, ports, mode='Enabled'):\n pass", "def disable_aaa_authentication_login(device,auth_list,auth_db1,auth_db2=None):\n cmd = f'no aaa authentication login {auth_list} {auth_db1}'\n if auth_db2:\n cmd += f' {auth_db2}'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not configure no aaa authentication login:\\n{e}'\n )", "def enabled(config):\n enable(config)\n reload_service('apache2')", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})", "def autodisable_cloud(cloud):\n log.warning(\"Autodisabling %s\", cloud)\n cloud.ctl.disable()\n title = \"Cloud %s has been automatically disabled\" % cloud.name\n message = \"%s after multiple failures to connect to it.\" % title\n notify_user(cloud.owner, title=title, message=message, email_notify=True)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def configure_masked_unmasked_enable_secret_password (device,\n password,\n privilege=None,\n ccp_name=None,\n algorithm_type=None,\n masked=True,\n secret=True,):\n cmd=\"enable \"\n if ccp_name :\n cmd+=f\" common-criteria-policy {ccp_name}\"\n if algorithm_type :\n cmd+=f\" algorithm-type {algorithm_type}\"\n if masked :\n cmd+=\" masked-secret\"\n elif secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege:\n cmd+=f\" level {privilege}\"\n if not(masked) :\n cmd+=f\" {password}\"\n\n masked_secret_dialog = Dialog(\n [\n Statement(\n pattern=r\".*Enter secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n ]\n )\n\n try:\n out=device.configure(cmd,reply=masked_secret_dialog)\n if re.search(r'[p|P]assword',out) and not(re.search(r'migrate',out)):\n raise SubCommandFailure(out)\n\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not configure enable password\"\n \"Error: {error}\".format(error=e)\n )", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def enable_autoscaling_readonly(self):\n self._request({\"enable-autoscaling-readonly\": True})", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def enable_acm_readonly(self):\n self._request({\"enable-acm-readonly\": True})", "def disable_pki(client, mount_point=\"pki\"):\n client.sys.disable_secrets_engine(mount_point)", "def enable_private_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_private_endpoint\")", "def test_disabled(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'disabled'\n key.audit(60, 80, 30, 20)\n assert key.audit_state == 'disabled'", "def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled", "def cmd_enable_private(self, argument):\n if self.bot.admins.authenticate(argument):\n self.bot.admins.add(self.nick)\n self.send(self.nick, _(\"User %s added to admins\"), self.nick)\n self.logger.info(\"User %s added to admins\" % self.nick)\n else:\n self.bot.admins.remove(self.nick)\n self.logger.warning(\"User %s tried to elevate privileges with wrong password '%s'\" % (self.nick, argument))", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def test_edit_user_enable_permit_sudo(driver):\n pass", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable_authentication():\n cherrypy.request.security = { \"user\" : \"\", \"name\" : \"\", \"roles\": [] }", "def protect_endpoint():\n pass", "def firewallOff():\n pass", "def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])", "def account_api_password_disable(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_disable.html')\n profile = request.user\n profile.set_api_password(None)\n profile.save()\n profile.log_change(request.user, \"Deleted own api password.\")\n messages.success(request, \"Your api password has been disabled.\")\n return redirect(account_edit)", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def disable_primary_site_administrator(self):\n dURL = self._url + \"/psa/disable\"\n params = {\n \"f\" : \"json\"\n }\n return self._con.post(path=dURL, postdata=params)", "def enable_root(self):\n return self.client.post(self.path+'/root')['user']['password']", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "def secureMySQL(dry=False):\n \n #is default for mysql on ubuntu 14.0.4\n #bind-address = 127.0.0.1\n pass", "def disable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -d\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf disabled\", \"changes\": True}\n else:\n # If pf was already disabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf not enabled\":\n ret = {\"comment\": \"pf already disabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not disable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def set_protection_enabled(self, c, state):\n self.enable_protection = state", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})", "def unconfigure_aaa_authentication_enable(device):\n\n cmd = f'no aaa authentication enable default'\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure aaa authentication enable:\\n{e}'\n )", "def enable_password_policy(self) -> bool:\n return pulumi.get(self, \"enable_password_policy\")", "def remove_auth_backend():\n\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Disabling auth backend for cluster {0}'.format(args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True", "def enable_auth_backend():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n data = {\"type\": \"kubernetes\"}\n url = \"{0}/sys/auth/{1}\".format(args.vault_url, args.k8s_cluster_name)\n print 'Enabling auth backend of type kubernetes for {0}'.format(args.k8s_cluster_name)\n req = send_post(url=url, data=data, headers=headers, return_output=True)\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n if 'path is already in use' in req.content:\n print 'NOTE: Auth backend already enabled, which means the cluster is already setup on Vault.'\n print 'NOTE: Moving forward to Role creation, which is namespace-based'\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def cmd_enable(self, app_name=None):\n rc = self.socket_command_with_project('enable', app_name)\n return rc", "def test_enable_virt_realm_remote_access(self):\n pass", "def pg_allow_replication(self, user, password, ip_ranges, restart=True):\n\n # XXX: does not support differing primary/replica pg versions\n self.create_db_user(user, password, replication=True)\n files.uncomment(self.pg_hba, \"local +replication\", use_sudo=True)\n for ip_range in ip_ranges:\n hostssl_line = (\n f\"hostssl replication all {ip_range} {self.pg_pw_encryption}\"\n )\n files.append(self.pg_hba, hostssl_line, use_sudo=True)\n if restart:\n sudo(\"service postgresql restart\")", "def auth_with_guest_security_db(self, sUserName, sUserPassword, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_AuthWithGuestSecurityDb(self.handle, sUserName, sUserPassword, nFlags)[0])", "def enable():\n ret = {}\n result = __salt__[\"cmd.run_all\"](\n \"pfctl -e\", output_loglevel=\"trace\", python_shell=False\n )\n\n if result[\"retcode\"] == 0:\n ret = {\"comment\": \"pf enabled\", \"changes\": True}\n else:\n # If pf was already enabled the return code is also non-zero.\n # Don't raise an exception in that case.\n if result[\"stderr\"] == \"pfctl: pf already enabled\":\n ret = {\"comment\": \"pf already enabled\", \"changes\": False}\n else:\n raise CommandExecutionError(\n \"Could not enable pf\",\n info={\"errors\": [result[\"stderr\"]], \"changes\": False},\n )\n\n return ret", "def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def _update_celery_ldap_settings(self, node_roles):\n env.pstat_settings['enable_celery_ldap'] = False\n env.enable_celery_ldap = False\n if 'has_ldap_access' in node_roles:\n logger.info(\"Configuring node to run celery_ldap\")\n env.pstat_settings['enable_celery_ldap'] = True\n env.enable_celery_ldap = True\n return\n\n logger.info(\"Node not configured to run celery_ldap\")", "def test_db_ssl_enable(self):\n\n # Check default state is SSL on\n with mock.patch.dict('os.environ', REQUIRED_SETTINGS, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )\n\n # Check enabling the setting explicitly\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'True'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {}\n )\n\n # Disable it\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_DB_DISABLE_SSL': 'False'\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n settings_vars['DATABASES']['default']['OPTIONS'],\n {'sslmode': 'require'}\n )", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")", "def disable_user(conn, user_dn):\n try:\n conn.modify(str(user_dn), {'userAccountControl': [(MODIFY_REPLACE, ['514'])]})\n except Exception as e:\n raise Exception(\"Can't disable the user :: {}\".format(e))", "def reconfigure_keystone_to_use_ldap(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n devops_pr_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n\n pr_controller = self.fuel_web.get_nailgun_node_by_devops_node(\n devops_pr_controller)\n\n self.show_step(2)\n config = utils.get_config_template('keystone_ldap')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(\n config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(4)\n self.fuel_web.task_wait(task, timeout=3600, interval=30)\n\n self.show_step(5)\n self.check_config_on_remote([pr_controller], structured_config)\n logger.info(\"New configuration was applied\")\n\n self.env.make_snapshot(\"reconfigure_keystone_to_use_ldap\")", "def enable():\n request = dict(id='gbn')\n _gbn_enable(request)", "def disable_auth(self):\n\n self._api_manager.disable_auth()", "def setLoopback(self, enable): \n if enable == True:\n DPxEnableDoutDinLoopback()\n else:\n DPxDisableDoutDinLoopback()", "def create_mongo_user():\n mongo_url = \"mongodb://mongo:mongo@code7-mongo/admin\"\n database = \"code7\"\n username = \"code7\"\n password = \"code7\"\n\n client = pymongo.MongoClient(mongo_url)\n mongo_db = pymongo.database.Database(client, database)\n\n mongo_db.add_user(\n username,\n password=password,\n **{\"roles\": [{\"role\": \"readWrite\", \"db\": database}, {\"role\": \"dbAdmin\", \"db\": database}]}\n )", "def getclient(hosts,replicaset,db_name,db_user,db_pwd):\n conn = MongoClient(hosts,replicaSet=replicaset,read_preference=ReadPreference.SECONDARY_PREFERRED)\n conn[db_name].authenticate(db_user,db_pwd,mechanism='SCRAM-SHA-1')\n return conn", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def unconfigure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"no aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure AAA auth proxy'\n )", "def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()", "def fusion_api_enable_pool(self, body, uri, api=None, headers=None):\n return self.idpool.enable(body, uri, api, headers)", "def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]", "def login_mongodb_cloud():\r\n\r\n try:\r\n config.read(config_file)\r\n user = config[\"mongodb_cloud\"][\"user\"]\r\n pw = config[\"mongodb_cloud\"][\"pw\"]\r\n print(f'Got user=***** pw=***** from {config_file}')\r\n except Exception as e:\r\n print(f'Error parsing {config_file}: {e}')\r\n\r\n client = pymongo.MongoClient(f'mongodb+srv://{user}:{pw}'\r\n '@cluster0-np6jb.gcp.mongodb.net/test'\r\n '?retryWrites=true')\r\n\r\n return client", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def enableOrDisableFeature(self, enable):\n\n validator = LogicalNvdimmValidator()\n\n scalable_pmem_config = ScalablePersistentMemoryConfig(self._restHelpers,\\\n validator, self._chif_lib)\n scalable_pmem_config.refresh()\n\n # pre-validation\n self._helpers.validateFeatureIsSupported(scalable_pmem_config)\n self._helpers.validateFunctionalityIsEnabled(scalable_pmem_config)\n\n if enable is False:\n # If user disables Scalable PMEM, revert any pending changes to\n # prevent data or configuration loss\n if self._rdmc.interactive:\n message = u\"Warning: disabling Scalable Persistent Memory will \"\\\n \"revert any pending configuration changes.\\n\"\n self._helpers.confirmChanges(message=message)\n self._restHelpers.revertSettings()\n\n patchAttributes = {\n \"FeatureEnabled\" : enable\n }\n _ = self._restHelpers.patchScalablePmemSettingAttributes(patchAttributes)\n\n sys.stdout.write(u\"\\nThe Scalable Persistent Memory feature has been \"\\\n \"set to: {}\\n\".format(\"Enabled\" if enable else \"Disabled\"))\n\n self._helpers.noticeRestartRequired(scalable_pmem_config)\n\n sys.stdout.write(\"\\n\\n\")", "def enable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)", "def disable_root_login():\n sudo('passwd --lock root')", "def set_management_https(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-https>{}</disable-https>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def test_modify_znode(self):\n z = self.test_start_one_value()\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"0\"}})", "def enable_root_user(self, instance):\n return instance.enable_root_user()", "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n password=quote_plus(password),\n host=quote_plus(host),\n port=port,\n dbname=dbname)\n else:\n uri = \"mongodb://{host}:{port}/{dbname}\".format(host=host, port=port, dbname=dbname)\n cls._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(uri, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)\n #LoopRunTask.register(cls._check_connection, 2)\n SingleTask.call_later(cls._check_connection, 2) #模拟串行定时器,避免并发\n logger.info(\"create mongodb connection pool.\")", "def enable(self) -> None:", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):" ]
[ "0.5751121", "0.56195325", "0.5540108", "0.53521764", "0.5319493", "0.52190715", "0.5144249", "0.5120799", "0.5098368", "0.5063028", "0.5047192", "0.5035032", "0.49773857", "0.49550235", "0.49518517", "0.49454325", "0.49319968", "0.49308294", "0.49165234", "0.48731655", "0.48696598", "0.48338133", "0.4833095", "0.48181367", "0.48150045", "0.481282", "0.4810283", "0.4806019", "0.47969133", "0.4792203", "0.47910753", "0.47761184", "0.4755472", "0.4754703", "0.47491637", "0.4748977", "0.47443524", "0.47347933", "0.4733614", "0.47276852", "0.47264874", "0.47050798", "0.46954778", "0.46929663", "0.46844122", "0.46782514", "0.46771535", "0.46688822", "0.46536502", "0.46477795", "0.4643554", "0.4635259", "0.46326712", "0.4632527", "0.462191", "0.461922", "0.46155158", "0.45949247", "0.45842597", "0.45828512", "0.45732972", "0.45719838", "0.45712572", "0.45677188", "0.45544493", "0.45516637", "0.4546666", "0.45417726", "0.45399833", "0.45389682", "0.4532951", "0.45289463", "0.45250478", "0.45176744", "0.45093423", "0.4494386", "0.44942436", "0.4492239", "0.4484793", "0.4479093", "0.44682428", "0.4467919", "0.44673017", "0.44579837", "0.44570845", "0.44486216", "0.44420943", "0.4441075", "0.44357204", "0.4435394", "0.44327477", "0.44322726", "0.44266033", "0.44257104", "0.44205645", "0.44183123", "0.4417359", "0.4417359", "0.4417359", "0.4417359", "0.4417359" ]
0.0
-1
Precautions The instance must be in the Running state when you call this operation. If you call this operation to modify specific instance parameters and the modification for part of the parameters can take effect only after an instance restart, the instance is automatically restarted after this operation is called. You can call the [DescribeParameterTemplates](~~67618~~) operation to query the parameters that take effect only after the instance is restarted.
def modify_parameters_with_options( self, request: dds_20151201_models.ModifyParametersRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyParametersResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.character_type): query['CharacterType'] = request.character_type if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.parameters): query['Parameters'] = request.parameters if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyParameters', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyParametersResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runtime_updatable_params(self) -> pulumi.Output['outputs.RuntimeUpdatableParamsResponse']:\n return pulumi.get(self, \"runtime_updatable_params\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def updateParameters(self):\n\n return", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def modify_parameters(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n ) -> dds_20151201_models.ModifyParametersResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_parameters_with_options(request, runtime)", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def set_parameters(self, params):\n self.kp = params.pgain", "def runtime_updatable_params(self) -> Optional[pulumi.Input['RuntimeUpdatableParamsArgs']]:\n return pulumi.get(self, \"runtime_updatable_params\")", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def _base_troposphere_template(self):\n template = troposphere.Template()\n template.add_parameter(\n troposphere.Parameter(\n \"Stage\",\n Default=\"dev\",\n Description=\"Name of the Stage\",\n Type=\"String\",\n )\n )\n\n template.add_parameter(\n troposphere.Parameter(\n \"Region\",\n Description=\"AWS Region\",\n Type=\"String\",\n )\n )\n return template", "def get_resource_params():\n return Parameter.list()", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def potential_parameters(cls):\n raise NotImplementedError()", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def pre_instance_ip_create(self, resource_dict):\n pass", "def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def updateParameters(self, parameters):\n\t\treturn", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def updateParameters(self, parameters):", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def test_meta_template_parameters(test_vpc_layer):\n assert test_vpc_layer.user_params.get('template_parameter') is not None\n assert test_vpc_layer.meta['parameters']['dummy_parameter']['value'] ==\\\n test_vpc_layer.user_params['template_parameter']", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def reset_parameters(self):\n self.embedding.reset_parameters()\n self.init_embedding()", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def edit_parameters(parameterized,with_apply=True,**params):\n if not with_apply:\n pf_class = ParametersFrame\n else:\n pf_class = ParametersFrameWithApply\n\n return pf_class(T.Toplevel(),parameterized,**params)", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def preprocess(self, instances, stats=None, **kwargs):\n pass", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def provision(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n\n if not vmrun.installedTools():\n puts_err(colored.red(\"Tools not installed\"))\n return\n\n provisioned = 0\n for i, provision in enumerate(self.get('provision', [])):\n\n if provision.get('type') == 'file':\n source = provision.get('source')\n destination = provision.get('destination')\n if utils.provision_file(vmrun, source, destination) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n elif provision.get('type') == 'shell':\n inline = provision.get('inline')\n path = provision.get('path')\n args = provision.get('args')\n if not isinstance(args, list):\n args = [args]\n if utils.provision_shell(vmrun, inline, path, args) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n else:\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))\n return\n else:\n puts_err(colored.green(\"Provisioned {} entries\".format(provisioned)))\n return\n\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))", "def required_parameters(self):\n return ['seed', 'run_params']", "def generate_ideal(self):\n return StageParameters(self, *self._ideal_values())", "def update_params(self):\n pass", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def do_overcloud_show_template_parameters(tuskar, args, outfile=sys.stdout):\n template_parameters = tuskar.overclouds.template_parameters()\n formatters = {\n '*': fmt.attributes_formatter\n }\n template_parameters_dict = template_parameters.to_dict()\n fmt.print_dict(template_parameters_dict, formatters, outfile=outfile)", "def hyperparameters(self):\n hyperparameters = super(TensorFlow, self).hyperparameters()\n\n self.checkpoint_path = self.checkpoint_path or self._default_s3_path('checkpoints')\n mpi_enabled = False\n\n if self._script_mode_enabled():\n additional_hyperparameters = {}\n\n if 'parameter_server' in self.distributions:\n ps_enabled = self.distributions['parameter_server'].get('enabled', False)\n additional_hyperparameters[self.LAUNCH_PS_ENV_NAME] = ps_enabled\n\n if 'mpi' in self.distributions:\n mpi_dict = self.distributions['mpi']\n mpi_enabled = mpi_dict.get('enabled', False)\n additional_hyperparameters[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled\n additional_hyperparameters[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get('processes_per_host', 1)\n additional_hyperparameters[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get('custom_mpi_options', '')\n\n self.model_dir = self.model_dir or self._default_s3_path('model', mpi=mpi_enabled)\n additional_hyperparameters['model_dir'] = self.model_dir\n else:\n additional_hyperparameters = {'checkpoint_path': self.checkpoint_path,\n 'training_steps': self.training_steps,\n 'evaluation_steps': self.evaluation_steps,\n 'sagemaker_requirements': self.requirements_file}\n\n hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))\n return hyperparameters", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def parameters(self) -> dict:\n return self._config.get('parameters', dict())", "def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")", "def postprocess_hyperparams(args, config):\n pass" ]
[ "0.52970576", "0.5158294", "0.5146255", "0.51394826", "0.5136844", "0.5133746", "0.51167595", "0.5108743", "0.5072519", "0.5060148", "0.5052976", "0.4969164", "0.4969164", "0.4955536", "0.49382296", "0.4936451", "0.4931274", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4902821", "0.4901451", "0.48979196", "0.48969564", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48810104", "0.4863457", "0.48596525", "0.48521855", "0.484082", "0.48268154", "0.4825639", "0.48138615", "0.47995675", "0.47967", "0.4791936", "0.4784865", "0.47844866", "0.4783893", "0.47715607", "0.47704443", "0.47591564", "0.47584447", "0.47549903", "0.4745988", "0.4745819", "0.47444648", "0.47430736", "0.47381255", "0.47339633", "0.47054031", "0.47042271", "0.47006083", "0.46900684", "0.46807006", "0.4680376", "0.4680303", "0.46797606", "0.46786225", "0.46774328", "0.46770334", "0.4676261" ]
0.53479296
0
Precautions The instance must be in the Running state when you call this operation. If you call this operation to modify specific instance parameters and the modification for part of the parameters can take effect only after an instance restart, the instance is automatically restarted after this operation is called. You can call the [DescribeParameterTemplates](~~67618~~) operation to query the parameters that take effect only after the instance is restarted.
async def modify_parameters_with_options_async( self, request: dds_20151201_models.ModifyParametersRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyParametersResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.character_type): query['CharacterType'] = request.character_type if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.parameters): query['Parameters'] = request.parameters if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyParameters', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyParametersResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_parameters_with_options(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyParametersResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.parameters):\n query['Parameters'] = request.parameters\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyParameters',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyParametersResponse(),\n self.call_api(params, req, runtime)\n )", "def runtime_updatable_params(self) -> pulumi.Output['outputs.RuntimeUpdatableParamsResponse']:\n return pulumi.get(self, \"runtime_updatable_params\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def updateParameters(self):\n\n return", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def modify_parameters(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n ) -> dds_20151201_models.ModifyParametersResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_parameters_with_options(request, runtime)", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def set_parameters(self, params):\n self.kp = params.pgain", "def runtime_updatable_params(self) -> Optional[pulumi.Input['RuntimeUpdatableParamsArgs']]:\n return pulumi.get(self, \"runtime_updatable_params\")", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def get_resource_params():\n return Parameter.list()", "def _base_troposphere_template(self):\n template = troposphere.Template()\n template.add_parameter(\n troposphere.Parameter(\n \"Stage\",\n Default=\"dev\",\n Description=\"Name of the Stage\",\n Type=\"String\",\n )\n )\n\n template.add_parameter(\n troposphere.Parameter(\n \"Region\",\n Description=\"AWS Region\",\n Type=\"String\",\n )\n )\n return template", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def potential_parameters(cls):\n raise NotImplementedError()", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def pre_instance_ip_create(self, resource_dict):\n pass", "def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def updateParameters(self, parameters):\n\t\treturn", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def updateParameters(self, parameters):", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def test_meta_template_parameters(test_vpc_layer):\n assert test_vpc_layer.user_params.get('template_parameter') is not None\n assert test_vpc_layer.meta['parameters']['dummy_parameter']['value'] ==\\\n test_vpc_layer.user_params['template_parameter']", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def reset_parameters(self):\n self.embedding.reset_parameters()\n self.init_embedding()", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def edit_parameters(parameterized,with_apply=True,**params):\n if not with_apply:\n pf_class = ParametersFrame\n else:\n pf_class = ParametersFrameWithApply\n\n return pf_class(T.Toplevel(),parameterized,**params)", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def preprocess(self, instances, stats=None, **kwargs):\n pass", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def provision(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n\n if not vmrun.installedTools():\n puts_err(colored.red(\"Tools not installed\"))\n return\n\n provisioned = 0\n for i, provision in enumerate(self.get('provision', [])):\n\n if provision.get('type') == 'file':\n source = provision.get('source')\n destination = provision.get('destination')\n if utils.provision_file(vmrun, source, destination) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n elif provision.get('type') == 'shell':\n inline = provision.get('inline')\n path = provision.get('path')\n args = provision.get('args')\n if not isinstance(args, list):\n args = [args]\n if utils.provision_shell(vmrun, inline, path, args) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n else:\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))\n return\n else:\n puts_err(colored.green(\"Provisioned {} entries\".format(provisioned)))\n return\n\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))", "def required_parameters(self):\n return ['seed', 'run_params']", "def generate_ideal(self):\n return StageParameters(self, *self._ideal_values())", "def update_params(self):\n pass", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def hyperparameters(self):\n hyperparameters = super(TensorFlow, self).hyperparameters()\n\n self.checkpoint_path = self.checkpoint_path or self._default_s3_path('checkpoints')\n mpi_enabled = False\n\n if self._script_mode_enabled():\n additional_hyperparameters = {}\n\n if 'parameter_server' in self.distributions:\n ps_enabled = self.distributions['parameter_server'].get('enabled', False)\n additional_hyperparameters[self.LAUNCH_PS_ENV_NAME] = ps_enabled\n\n if 'mpi' in self.distributions:\n mpi_dict = self.distributions['mpi']\n mpi_enabled = mpi_dict.get('enabled', False)\n additional_hyperparameters[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled\n additional_hyperparameters[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get('processes_per_host', 1)\n additional_hyperparameters[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get('custom_mpi_options', '')\n\n self.model_dir = self.model_dir or self._default_s3_path('model', mpi=mpi_enabled)\n additional_hyperparameters['model_dir'] = self.model_dir\n else:\n additional_hyperparameters = {'checkpoint_path': self.checkpoint_path,\n 'training_steps': self.training_steps,\n 'evaluation_steps': self.evaluation_steps,\n 'sagemaker_requirements': self.requirements_file}\n\n hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))\n return hyperparameters", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")", "def parameters(self) -> dict:\n return self._config.get('parameters', dict())", "def do_overcloud_show_template_parameters(tuskar, args, outfile=sys.stdout):\n template_parameters = tuskar.overclouds.template_parameters()\n formatters = {\n '*': fmt.attributes_formatter\n }\n template_parameters_dict = template_parameters.to_dict()\n fmt.print_dict(template_parameters_dict, formatters, outfile=outfile)", "def postprocess_hyperparams(args, config):\n pass" ]
[ "0.53488946", "0.5298454", "0.51589996", "0.5146814", "0.5140348", "0.5136659", "0.5134305", "0.51175743", "0.5109007", "0.5072674", "0.5060626", "0.50541425", "0.49702355", "0.49702355", "0.49557152", "0.49375644", "0.49352777", "0.49315032", "0.49315032", "0.49315032", "0.49315032", "0.49315032", "0.49315032", "0.49315032", "0.49315032", "0.49309894", "0.49050742", "0.49018714", "0.4899097", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48976094", "0.48940253", "0.48790234", "0.4862839", "0.48607847", "0.48527727", "0.4841604", "0.48275566", "0.48245302", "0.4814665", "0.48009306", "0.47957724", "0.47908837", "0.4786679", "0.47859386", "0.47840175", "0.47714576", "0.47709253", "0.4760399", "0.47596642", "0.47562405", "0.47472382", "0.4746792", "0.47445366", "0.47429925", "0.473655", "0.47345743", "0.47068393", "0.470357", "0.47019112", "0.46911702", "0.46817452", "0.46813616", "0.468015", "0.468", "0.46790326", "0.4678737", "0.4678618", "0.46771395" ]
0.0
-1
Precautions The instance must be in the Running state when you call this operation. If you call this operation to modify specific instance parameters and the modification for part of the parameters can take effect only after an instance restart, the instance is automatically restarted after this operation is called. You can call the [DescribeParameterTemplates](~~67618~~) operation to query the parameters that take effect only after the instance is restarted.
def modify_parameters( self, request: dds_20151201_models.ModifyParametersRequest, ) -> dds_20151201_models.ModifyParametersResponse: runtime = util_models.RuntimeOptions() return self.modify_parameters_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_parameters_with_options(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyParametersResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.parameters):\n query['Parameters'] = request.parameters\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyParameters',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyParametersResponse(),\n self.call_api(params, req, runtime)\n )", "def runtime_updatable_params(self) -> pulumi.Output['outputs.RuntimeUpdatableParamsResponse']:\n return pulumi.get(self, \"runtime_updatable_params\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def updateParameters(self):\n\n return", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def set_parameters(self, params):\n self.kp = params.pgain", "def runtime_updatable_params(self) -> Optional[pulumi.Input['RuntimeUpdatableParamsArgs']]:\n return pulumi.get(self, \"runtime_updatable_params\")", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def _base_troposphere_template(self):\n template = troposphere.Template()\n template.add_parameter(\n troposphere.Parameter(\n \"Stage\",\n Default=\"dev\",\n Description=\"Name of the Stage\",\n Type=\"String\",\n )\n )\n\n template.add_parameter(\n troposphere.Parameter(\n \"Region\",\n Description=\"AWS Region\",\n Type=\"String\",\n )\n )\n return template", "def get_resource_params():\n return Parameter.list()", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def potential_parameters(cls):\n raise NotImplementedError()", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def pre_instance_ip_create(self, resource_dict):\n pass", "def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def updateParameters(self, parameters):\n\t\treturn", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def updateParameters(self, parameters):", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def test_meta_template_parameters(test_vpc_layer):\n assert test_vpc_layer.user_params.get('template_parameter') is not None\n assert test_vpc_layer.meta['parameters']['dummy_parameter']['value'] ==\\\n test_vpc_layer.user_params['template_parameter']", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def reset_parameters(self):\n self.embedding.reset_parameters()\n self.init_embedding()", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def edit_parameters(parameterized,with_apply=True,**params):\n if not with_apply:\n pf_class = ParametersFrame\n else:\n pf_class = ParametersFrameWithApply\n\n return pf_class(T.Toplevel(),parameterized,**params)", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def preprocess(self, instances, stats=None, **kwargs):\n pass", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def provision(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n\n if not vmrun.installedTools():\n puts_err(colored.red(\"Tools not installed\"))\n return\n\n provisioned = 0\n for i, provision in enumerate(self.get('provision', [])):\n\n if provision.get('type') == 'file':\n source = provision.get('source')\n destination = provision.get('destination')\n if utils.provision_file(vmrun, source, destination) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n elif provision.get('type') == 'shell':\n inline = provision.get('inline')\n path = provision.get('path')\n args = provision.get('args')\n if not isinstance(args, list):\n args = [args]\n if utils.provision_shell(vmrun, inline, path, args) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n else:\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))\n return\n else:\n puts_err(colored.green(\"Provisioned {} entries\".format(provisioned)))\n return\n\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))", "def required_parameters(self):\n return ['seed', 'run_params']", "def generate_ideal(self):\n return StageParameters(self, *self._ideal_values())", "def update_params(self):\n pass", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def do_overcloud_show_template_parameters(tuskar, args, outfile=sys.stdout):\n template_parameters = tuskar.overclouds.template_parameters()\n formatters = {\n '*': fmt.attributes_formatter\n }\n template_parameters_dict = template_parameters.to_dict()\n fmt.print_dict(template_parameters_dict, formatters, outfile=outfile)", "def hyperparameters(self):\n hyperparameters = super(TensorFlow, self).hyperparameters()\n\n self.checkpoint_path = self.checkpoint_path or self._default_s3_path('checkpoints')\n mpi_enabled = False\n\n if self._script_mode_enabled():\n additional_hyperparameters = {}\n\n if 'parameter_server' in self.distributions:\n ps_enabled = self.distributions['parameter_server'].get('enabled', False)\n additional_hyperparameters[self.LAUNCH_PS_ENV_NAME] = ps_enabled\n\n if 'mpi' in self.distributions:\n mpi_dict = self.distributions['mpi']\n mpi_enabled = mpi_dict.get('enabled', False)\n additional_hyperparameters[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled\n additional_hyperparameters[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get('processes_per_host', 1)\n additional_hyperparameters[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get('custom_mpi_options', '')\n\n self.model_dir = self.model_dir or self._default_s3_path('model', mpi=mpi_enabled)\n additional_hyperparameters['model_dir'] = self.model_dir\n else:\n additional_hyperparameters = {'checkpoint_path': self.checkpoint_path,\n 'training_steps': self.training_steps,\n 'evaluation_steps': self.evaluation_steps,\n 'sagemaker_requirements': self.requirements_file}\n\n hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))\n return hyperparameters", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def parameters(self) -> dict:\n return self._config.get('parameters', dict())", "def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")", "def postprocess_hyperparams(args, config):\n pass" ]
[ "0.53479296", "0.52970576", "0.5158294", "0.5146255", "0.51394826", "0.5136844", "0.5133746", "0.51167595", "0.5072519", "0.5060148", "0.5052976", "0.4969164", "0.4969164", "0.4955536", "0.49382296", "0.4936451", "0.4931274", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4930858", "0.4902821", "0.4901451", "0.48979196", "0.48969564", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48968825", "0.48810104", "0.4863457", "0.48596525", "0.48521855", "0.484082", "0.48268154", "0.4825639", "0.48138615", "0.47995675", "0.47967", "0.4791936", "0.4784865", "0.47844866", "0.4783893", "0.47715607", "0.47704443", "0.47591564", "0.47584447", "0.47549903", "0.4745988", "0.4745819", "0.47444648", "0.47430736", "0.47381255", "0.47339633", "0.47054031", "0.47042271", "0.47006083", "0.46900684", "0.46807006", "0.4680376", "0.4680303", "0.46797606", "0.46786225", "0.46774328", "0.46770334", "0.4676261" ]
0.5108743
8
Precautions The instance must be in the Running state when you call this operation. If you call this operation to modify specific instance parameters and the modification for part of the parameters can take effect only after an instance restart, the instance is automatically restarted after this operation is called. You can call the [DescribeParameterTemplates](~~67618~~) operation to query the parameters that take effect only after the instance is restarted.
async def modify_parameters_async( self, request: dds_20151201_models.ModifyParametersRequest, ) -> dds_20151201_models.ModifyParametersResponse: runtime = util_models.RuntimeOptions() return await self.modify_parameters_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_parameters_with_options(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ModifyParametersResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.parameters):\n query['Parameters'] = request.parameters\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ModifyParameters',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ModifyParametersResponse(),\n self.call_api(params, req, runtime)\n )", "def runtime_updatable_params(self) -> pulumi.Output['outputs.RuntimeUpdatableParamsResponse']:\n return pulumi.get(self, \"runtime_updatable_params\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def gen_parameters(self):\n\n print \"\\t* Adding parameters to compute template\"\n # get all the server client\n servers = self.novaclient.servers.list()\n\n # add all key_pair_names\n self.gen_key_name_parameters(servers)\n\n # add all images\n self.gen_image_parameters(servers)\n\n # add all flavors\n self.gen_flavor_parameters(servers)\n\n # add all networks\n self.gen_network_parameters()", "def updateParameters(self):\n\n return", "def ApplyRuntimeParameters(self):\n \n if self.models is None or len(self.models) == 0:\n\n if self.verbose:\n\n print \"No model runtime parameters defined\"\n\n return\n\n num_models = len(self.models)\n\n if self.verbose:\n\n print \"Applying model runtime parameters to %d models\" % num_models\n\n for m in self.models:\n\n try:\n \n modelname = m['modelname']\n\n if self.verbose:\n\n print \"\\tSetting runtime parameters for '%s'\" % modelname\n\n\n self.SetModelName(modelname)\n \n if m.has_key('runtime_parameters') and not m['runtime_parameters'] is None:\n \n for parameter in m['runtime_parameters']:\n\n component_name = parameter[0]\n field = parameter[1]\n val = parameter[2]\n\n self.SetParameter(path=component_name, parameter=field, value=val)\n\n except Exception, e:\n\n print e\n\n continue\n\n # Now apply genericly set parameters\n\n if len(self._runtime_parameters) > 0:\n\n if self.verbose:\n\n print \"Applying generically set model runtime parameters\"\n\n \n for p in self._runtime_parameters:\n\n try:\n\n path = p['path'] \n parameter = p['parameter']\n value = p['value']\n service = None if not p.has_key('service') else p['service']\n\n self.SetParameter(path, parameter, value, service)\n \n except Exception, e:\n\n print e\n\n continue", "def modify_parameters(\n self,\n request: dds_20151201_models.ModifyParametersRequest,\n ) -> dds_20151201_models.ModifyParametersResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_parameters_with_options(request, runtime)", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def set_parameters(self, params):\n self.kp = params.pgain", "def runtime_updatable_params(self) -> Optional[pulumi.Input['RuntimeUpdatableParamsArgs']]:\n return pulumi.get(self, \"runtime_updatable_params\")", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def _base_troposphere_template(self):\n template = troposphere.Template()\n template.add_parameter(\n troposphere.Parameter(\n \"Stage\",\n Default=\"dev\",\n Description=\"Name of the Stage\",\n Type=\"String\",\n )\n )\n\n template.add_parameter(\n troposphere.Parameter(\n \"Region\",\n Description=\"AWS Region\",\n Type=\"String\",\n )\n )\n return template", "def get_resource_params():\n return Parameter.list()", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def potential_parameters(cls):\n raise NotImplementedError()", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def pre_instance_ip_create(self, resource_dict):\n pass", "def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def updateParameters(self, parameters):\n\t\treturn", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def updateParameters(self, parameters):", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def test_meta_template_parameters(test_vpc_layer):\n assert test_vpc_layer.user_params.get('template_parameter') is not None\n assert test_vpc_layer.meta['parameters']['dummy_parameter']['value'] ==\\\n test_vpc_layer.user_params['template_parameter']", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def output_parameters(self):\n output_params = get_data_node(\n 'parameter',\n dict={\n 'stress': self.vasprun_obj.ionic_steps[-1]['stress'],\n 'efermi': self.vasprun_obj.efermi,\n 'energy': self.vasprun_obj.final_energy\n })\n return output_params", "def reset_parameters(self):\n\n for layer in self.layers:\n layer.reset_parameters()", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def reset_parameters(self):\n self.embedding.reset_parameters()\n self.init_embedding()", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def edit_parameters(parameterized,with_apply=True,**params):\n if not with_apply:\n pf_class = ParametersFrame\n else:\n pf_class = ParametersFrameWithApply\n\n return pf_class(T.Toplevel(),parameterized,**params)", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def ps(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n print(vmrun.listProcessesInGuest())", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def preprocess(self, instances, stats=None, **kwargs):\n pass", "def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def provision(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n\n if not vmrun.installedTools():\n puts_err(colored.red(\"Tools not installed\"))\n return\n\n provisioned = 0\n for i, provision in enumerate(self.get('provision', [])):\n\n if provision.get('type') == 'file':\n source = provision.get('source')\n destination = provision.get('destination')\n if utils.provision_file(vmrun, source, destination) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n elif provision.get('type') == 'shell':\n inline = provision.get('inline')\n path = provision.get('path')\n args = provision.get('args')\n if not isinstance(args, list):\n args = [args]\n if utils.provision_shell(vmrun, inline, path, args) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n else:\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))\n return\n else:\n puts_err(colored.green(\"Provisioned {} entries\".format(provisioned)))\n return\n\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))", "def required_parameters(self):\n return ['seed', 'run_params']", "def generate_ideal(self):\n return StageParameters(self, *self._ideal_values())", "def update_params(self):\n pass", "def create_params(self):\n\n params = {'time_step':\n DesignParameter('time_step',\n unit='s',\n description='Time step with which the component model will be discretized'),\n 'horizon':\n DesignParameter('horizon',\n unit='s',\n description='Horizon of the optimization problem'),\n 'lines': DesignParameter('lines',\n unit='-',\n description='List of names of the lines that can be found in the network, e.g. '\n '\\'supply\\' and \\'return\\'',\n val=['supply', 'return'])\n }\n return params", "def do_overcloud_show_template_parameters(tuskar, args, outfile=sys.stdout):\n template_parameters = tuskar.overclouds.template_parameters()\n formatters = {\n '*': fmt.attributes_formatter\n }\n template_parameters_dict = template_parameters.to_dict()\n fmt.print_dict(template_parameters_dict, formatters, outfile=outfile)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def hyperparameters(self):\n hyperparameters = super(TensorFlow, self).hyperparameters()\n\n self.checkpoint_path = self.checkpoint_path or self._default_s3_path('checkpoints')\n mpi_enabled = False\n\n if self._script_mode_enabled():\n additional_hyperparameters = {}\n\n if 'parameter_server' in self.distributions:\n ps_enabled = self.distributions['parameter_server'].get('enabled', False)\n additional_hyperparameters[self.LAUNCH_PS_ENV_NAME] = ps_enabled\n\n if 'mpi' in self.distributions:\n mpi_dict = self.distributions['mpi']\n mpi_enabled = mpi_dict.get('enabled', False)\n additional_hyperparameters[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled\n additional_hyperparameters[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get('processes_per_host', 1)\n additional_hyperparameters[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get('custom_mpi_options', '')\n\n self.model_dir = self.model_dir or self._default_s3_path('model', mpi=mpi_enabled)\n additional_hyperparameters['model_dir'] = self.model_dir\n else:\n additional_hyperparameters = {'checkpoint_path': self.checkpoint_path,\n 'training_steps': self.training_steps,\n 'evaluation_steps': self.evaluation_steps,\n 'sagemaker_requirements': self.requirements_file}\n\n hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))\n return hyperparameters", "def parameters(self) -> dict:\n return self._config.get('parameters', dict())", "def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")", "def postprocess_hyperparams(args, config):\n pass" ]
[ "0.5347873", "0.5297021", "0.5158198", "0.51435643", "0.5138627", "0.5135741", "0.5132744", "0.51161265", "0.5107304", "0.5071189", "0.5058572", "0.5052652", "0.4967741", "0.4967741", "0.4954043", "0.49358413", "0.49351642", "0.4931862", "0.49299216", "0.49299216", "0.49299216", "0.49299216", "0.49299216", "0.49299216", "0.49299216", "0.49299216", "0.4901995", "0.49008888", "0.4896909", "0.48964643", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4895922", "0.4880292", "0.48629472", "0.48587498", "0.48510727", "0.48397157", "0.48263827", "0.482428", "0.481366", "0.47989553", "0.47970948", "0.4791365", "0.47843313", "0.4784199", "0.47826296", "0.4769905", "0.476942", "0.475892", "0.47571945", "0.47535318", "0.4745499", "0.47449648", "0.47432965", "0.4742853", "0.47377968", "0.473302", "0.47041565", "0.47034913", "0.46996567", "0.4689709", "0.4680922", "0.46802485", "0.46793935", "0.46789178", "0.46783614", "0.46762586", "0.46762443", "0.4676211" ]
0.0
-1
Resource Management allows you to build an organizational structure for resources based on your business requirements. You can use resource directories, folders, accounts, and resource groups to hierarchically organize and manage resources. For more information, see [What is Resource Management?](~~94475~~)
def modify_resource_group_with_options( self, request: dds_20151201_models.ModifyResourceGroupRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyResourceGroupResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyResourceGroup', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyResourceGroupResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def resources():\n check_resources()", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def resource_prefix(self):", "def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }", "def resources(self):\n return self.__resources", "def resource_map(self):", "def test_create_namespaced_local_resource_access_review(self):\n pass", "def test_create_namespaced_resource_access_review(self):\n pass", "def ResourcePath(self, name):\n pass", "def _categorize_resource(self, resource: Resource, required_permissions: str) -> None:\n if resource.is_user_provided:\n self.resources_reused.append({\"arn\": resource.arn, \"required_permissions\": required_permissions})\n else:\n self.resources_created.append({\"arn\": resource.arn})", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resource_manager():\n return visa.ResourceManager()", "def testSubResources(self):\n\n def CountResourceTree(resource):\n ret = 0\n for r in resource._resources:\n ret += 1 + CountResourceTree(r)\n return ret\n\n api = self.ApiFromDiscoveryDoc('moderator.v1.json')\n top_level_resources = 0\n total_resources = 0\n non_method_resources = 0\n have_sub_resources = 0\n have_sub_resources_and_methods = 0\n for r in api._resources:\n top_level_resources += 1\n total_resources += 1 + CountResourceTree(r)\n if not r._methods:\n non_method_resources += 1\n if r._resources:\n have_sub_resources += 1\n if r._resources and r._methods:\n have_sub_resources_and_methods += 1\n # Hand counted 18 resources in the file.\n self.assertEquals(18, total_resources)\n self.assertEquals(11, top_level_resources)\n # 4 of them have no methods, only sub resources\n self.assertEquals(4, non_method_resources)\n # 6 of them have sub resources.\n self.assertEquals(6, have_sub_resources)\n # And, of course, 2 should have both sub resources and methods\n self.assertEquals(2, have_sub_resources_and_methods)", "def test_create_local_resource_access_review_for_all_namespaces(self):\n pass", "def GetResourceAclSample():\n client = CreateClient()\n for resource in client.GetResources(limit=5).entry:\n acl_feed = client.GetResourceAcl(resource)\n for acl in acl_feed.entry:\n print acl.role.value, acl.scope.type, acl.scope.value", "def test_objectresource_resourcenameforuid(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n name = yield calendar.resourceNameForUID(\"uid1\")\n self.assertEqual(name, \"1.ics\")\n\n name = yield calendar.resourceNameForUID(\"uid2\")\n self.assertEqual(name, \"2.ics\")\n\n name = yield calendar.resourceNameForUID(\"foo\")\n self.assertEqual(name, None)\n\n yield self.commitTransaction(1)", "def test_create_resource_group(self):\n pass", "def create_resource_object():\n\n # Create two objects of different users and same center code\n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=1, cooperative_center_code='BR1.1')\n \n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n # Create one object of diffent center code\n Resource.objects.create(status=0, title='Recurso de teste (PY3.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=3, cooperative_center_code='PY3.1')\n\n\n # add descriptor and thematic area for resource pk 1\n object_ct = ContentType.objects.get_for_model(Resource)\n descriptor = Descriptor.objects.create(object_id=1, content_type=object_ct, text='descritor 1')\n keyword = Keyword.objects.create(object_id=1, content_type=object_ct, text='keyword 1')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=object_ct, thematic_area_id=1)", "def getResources(self, folder):\n\n #-------------------- \n # Get the resource JSON\n #-------------------- \n folder += \"/resources\"\n resources = self.__getJson(folder)\n #print(\"%s %s\"%(, folder))\n #print(\" Got resources: '%s'\"%(str(resources)))\n\n\n\n #-------------------- \n # Filter the JSONs\n #-------------------- \n resourceNames = []\n for r in resources:\n if 'label' in r:\n resourceNames.append(r['label'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['label']))\n elif 'Name' in r:\n resourceNames.append(r['Name'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['Name']))\n\n return resourceNames", "def resources(request):\n projects, secrets, pools, storageclasses, pvcs, pods = ([] for i in range(6))\n\n def finalizer():\n \"\"\"\n Delete the resources created during the test\n \"\"\"\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)\n\n request.addfinalizer(finalizer)\n\n return projects, secrets, pools, storageclasses, pvcs, pods", "def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources", "def CreateResourceInCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc, collection=col)\n print 'Created:', doc.title.text, doc.resource_id.text", "def test_getResourceRelations(self):\n pass", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def generate_config(context):\n\n properties = context.properties\n\n base_resource = get_type(context)\n\n resources = []\n\n if 'dependsOn' in properties:\n dependson = {'metadata': {'dependsOn': properties['dependsOn']}}\n dependson_root = properties['dependsOn']\n else:\n dependson = {}\n dependson_root = []\n\n for role in properties['roles']:\n for member in role['members']:\n suffix = sha1(\n '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10]\n policy_get_name = '{}-{}'.format(context.env['name'], suffix)\n\n resource_name = '{}-{}'.format(policy_get_name,\n base_resource['postfix'])\n iam_resource = {\n 'name': resource_name,\n # TODO - Virtual type documentation needed\n 'type': base_resource['dm_type'],\n 'properties': {\n base_resource['dm_resource_property']: base_resource['id'],\n 'role': role['role'],\n 'member': member,\n }\n }\n iam_resource.update(dependson)\n resources.append(iam_resource)\n\n dependson = {'metadata': {'dependsOn': [\n resource_name] + dependson_root}}\n\n return {\"resources\": resources}", "def resources(stack, region, profile):\n logging.debug(f'finding resources - stack: {stack}')\n logging.debug(f'region: {region}')\n logging.debug(f'profile: {profile}')\n tool = ResourceTool(\n Stack=stack,\n Region=region,\n Profile=profile,\n Verbose=True\n )\n\n if tool.list_resources():\n sys.exit(0)\n else:\n sys.exit(1)", "def subdir(self):", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def format_resource_tree(\n nested_resources, # type: NestedResourceNodes\n db_session, # type: Session\n resources_perms_dict=None, # type: Optional[ResourcePermissionMap]\n permission_type=None, # type: Optional[PermissionType]\n nesting_key=\"children\", # type: NestingKeyType\n): # type: (...) -> JSON\n # optimization to avoid re-lookup of 'allowed permissions' when already fetched\n # unused when parsing 'applied permissions'\n __internal_svc_res_perm_dict = {}\n\n def recursive_fmt_res_tree(nested_dict): # type: (NestedResourceNodes) -> JSON\n fmt_res_tree = {}\n for child_id, child_dict in nested_dict.items():\n resource = child_dict[\"node\"]\n # nested nodes always use 'children' regardless of nested-key\n # nested-key employed in the generated format will indicate the real resource parents/children relationship\n new_nested = child_dict[\"children\"]\n perms = []\n\n # case of pre-specified user/group-specific permissions\n if resources_perms_dict is not None:\n if resource.resource_id in resources_perms_dict.keys():\n perms = resources_perms_dict[resource.resource_id]\n\n # case of full fetch (allowed resource permissions)\n else:\n # directly access the resource if it is a service\n service = None # type: Optional[Service]\n if resource.root_service_id is None:\n service = resource\n service_id = resource.resource_id\n # obtain corresponding top-level service resource if not already available,\n # get resource permissions allowed under the top service's scope\n else:\n service_id = resource.root_service_id\n if service_id not in __internal_svc_res_perm_dict:\n service = ResourceService.by_resource_id(service_id, db_session=db_session)\n # add to dict only if not already added\n if service is not None and service_id not in __internal_svc_res_perm_dict:\n __internal_svc_res_perm_dict[service_id] = {\n res_type.resource_type_name: res_perms # use str key to match below 'resource_type' field\n for res_type, res_perms in SERVICE_TYPE_DICT[service.type].resource_types_permissions.items()\n }\n # in case of inverse nesting, service could be at \"bottom\"\n # retrieve its permissions directly since its type is never expected nested under itself\n res_type_name = resource.resource_type # type: Str\n if res_type_name == \"service\":\n perms = SERVICE_TYPE_DICT[service.type].permissions\n else:\n perms = __internal_svc_res_perm_dict[service_id][resource.resource_type]\n\n fmt_res_tree[child_id] = format_resource(resource, perms, permission_type)\n fmt_res_tree[child_id][nesting_key] = recursive_fmt_res_tree(new_nested)\n return fmt_res_tree\n\n return recursive_fmt_res_tree(nested_resources)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n group: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n organization_arn: Optional[pulumi.Input[str]] = None,\n organizational_unit_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def assemble_resources(base_dir, resource_path, resources, type_restrictions=None):\n for f in resources:\n if type_restrictions and f.kind not in type_restrictions:\n continue\n target_dir = os.path.abspath(os.path.join(base_dir, resource_path, ResourceFile.DIR_MAP[f.kind]))\n f.copy_all_variants_to_dir(target_dir)", "def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def resources(self, resources):\n self._resources = resources", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def test_get_deployment_resources(self):\n pass", "def is_reserved_resource(self, work_dir: str, resource: str) -> bool:\n resource_dir = resource.split(\"/\")[0] if \"/\" in resource else resource\n if resource.startswith(\".resumables-\") and resource.endswith(\".db\"):\n logging.error(f\"resumable dbs not accessible {resource}\")\n return True\n elif re.match(r\"(.+)\\.([a-f\\d0-9-]{32,36})$\", resource):\n logging.error(\"merged resumable files not accessible\")\n return True\n elif re.match(r\"(.+).([a-f\\d0-9-]{32,36}).part$\", resource):\n logging.error(\"partial upload files not accessible\")\n return True\n elif VALID_UUID.match(resource_dir):\n potential_target = os.path.normpath(f\"{work_dir}/{resource_dir}\")\n if os.path.lexists(potential_target) and os.path.isdir(potential_target):\n content = os.listdir(potential_target)\n for entry in content:\n if re.match(r\"(.+).chunk.[0-9]+$\", entry):\n logging.error(f\"resumable directories not accessible {entry}\")\n return True\n return False", "def resource(self, n):\n\n cfg = self.read()\n\n for res in cfg.get('Resources', []):\n res_name = res.get('Resource')\n\n if res_name == n:\n return ConfigResource(res)", "def resources(self):\n return [self]", "def test_access_resource(self):\n test_resource = ResourceTypeName.get()\n role_name = 'test_role'\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n with self.subTest(\"Permission is denied\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 403)\n\n role_request_body = {\n \"role_id\": role_name,\n \"policy\": {\n 'Statement': [{\n 'Sid': role_name,\n 'Action': [\n \"fus:DeleteResources\",\n \"fus:GetResources\"],\n 'Effect': 'Allow',\n 'Resource': [f\"arn:hca:fus:*:*:resource/{test_resource}\"]\n }]\n }\n }\n resp = self.app.post(f'/v1/role', data=json.dumps(role_request_body), headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n resp = self.app.put(f\"/v1/user/{service_accounts['user']['client_email']}/roles?action=add\",\n data=json.dumps({'roles': [role_name]}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n with self.subTest(\"Permission is granted\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 200)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def register_resources(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n domain_id: Optional[pulumi.Input[str]] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n role_id: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def register_dcc_resource_path(resources_path):\n\n pass", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def add_resources(event):\n anuket_resources.need()", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def get_resources(self, resource_data=None):\n if not resource_data and self.component:\n resource_data = self.component.get_resource_data()\n\n resources = []\n for resource in self.files:\n resource.update(resource_data)\n\n resource['storage_path'] = self.prefix + '/' + resource['name']\n relative_path = self.relative_path(data=resource)\n resource['relative_path'] = relative_path\n resource['url'] = resource['url'] + '/' + relative_path\n resources.append(resource)\n return resources", "def test_create_cluster_resource_quota(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def test_sm_resource_object(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_oid = faked_storage_group.oid\n\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_group = storage_group_mgr.resource_object(storage_group_oid)\n\n storage_group_uri = \"/api/storage-groups/\" + storage_group_oid\n\n sv_mgr = storage_group.storage_volumes\n vsr_mgr = storage_group.virtual_storage_resources\n\n assert isinstance(storage_group, StorageGroup)\n assert isinstance(sv_mgr, StorageVolumeManager)\n assert isinstance(vsr_mgr, VirtualStorageResourceManager)\n\n sg_cpc = storage_group.cpc\n assert isinstance(sg_cpc, Cpc)\n assert sg_cpc.uri == storage_group.properties['cpc-uri']\n\n # Note: Properties inherited from BaseResource are tested there,\n # but we test them again:\n assert storage_group.properties['object-uri'] == storage_group_uri\n assert storage_group.properties['object-id'] == storage_group_oid\n assert storage_group.properties['class'] == 'storage-group'\n assert storage_group.properties['parent'] == self.console.uri", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def CreateResources(self, manifests, region):\n resource_dict = manifest_util.ParseDeployConfig(self.messages, manifests,\n region)\n msg_template = 'Created Cloud Deploy resource: {}.'\n # Create delivery pipeline first.\n # In case user has both types of pipeline definition in the same\n # config file.\n pipelines = resource_dict[manifest_util.DELIVERY_PIPELINE_KIND_V1BETA1]\n if pipelines:\n operation_dict = {}\n for resource in pipelines:\n operation_dict[resource.name] = self.CreateDeliveryPipeline(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # In case user has both types of target definition in the same\n # config file.\n targets = resource_dict[manifest_util.TARGET_KIND_V1BETA1]\n if targets:\n operation_dict = {}\n for resource in targets:\n operation_dict[resource.name] = target_util.PatchTarget(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create automation resource.\n automations = resource_dict[manifest_util.AUTOMATION_KIND]\n operation_dict = {}\n for resource in automations:\n operation_dict[resource.name] = automation_util.PatchAutomation(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create custom target type resource.\n custom_target_types = resource_dict[manifest_util.CUSTOM_TARGET_TYPE_KIND]\n operation_dict = {}\n for resource in custom_target_types:\n operation_dict[resource.name] = (\n custom_target_type_util.PatchCustomTargetType(resource)\n )\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)", "def getResource(self):\n pass;", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def test_objectresource_countobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n count = yield calendar.countObjectResources()\n self.assertEqual(count, 2)\n yield self.commitTransaction(1)", "def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n managed_network_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text", "def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")", "def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)", "def test_objectresource_objectwith(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n resource01 = yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n resource = yield calendar.objectResourceWithName(\"2.ics\")\n self.assertEqual(resource.name(), \"2.ics\")\n\n resource = yield calendar.objectResourceWithName(\"foo.ics\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithUID(\"uid1\")\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithUID(\"foo\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithID(resource01.id())\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithID(12345)\n self.assertEqual(resource, None)\n\n yield self.commitTransaction(1)", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def create_resources(self, pool=True, job=True, storage=True):\n\n if pool:\n self.controller.create_pool(self.info)\n self.logger.info(\"Pool of the mission %s created.\", self.info.name)\n\n if job:\n self.controller.create_job(self.info)\n self.logger.info(\"Job of the mission %s created.\", self.info.name)\n\n if storage:\n self.controller.create_storage_container(self.info)\n self.controller.get_storage_container_access_tokens(self.info)\n self.logger.info(\"Storage of the mission %s created.\", self.info.name)\n\n self.logger.info(\"Resources of the mission %s created.\", self.info.name)", "def install_private_resources(context):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def test_resource_combinations_rpc(\n self, ns_resource_factory, bucket_factory, platform1, platform2\n ):\n # Create the namespace resources and verify health\n ns_resource_name1 = ns_resource_factory(platform=platform1)[1]\n ns_resource_name2 = ns_resource_factory(platform=platform2)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name1,\n read_ns_resources=[ns_resource_name1, ns_resource_name2],\n )", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resourceid(self):", "def resources(ctx, job, gpu):\n\n def get_experiment_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment.resources(\n user, project_name, _experiment, message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n def get_experiment_job_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment_job.resources(user,\n project_name,\n _experiment,\n _job,\n message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_resources()\n else:\n get_experiment_resources()", "def test_resource_namespace(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n MM = MetaManifest\n\n for src in [\"\", \"v1\"]:\n # A particular Namespace.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces/name\",\n )\n\n # A particular Namespace in a particular namespace -> Invalid.\n assert k8s.resource(config, MM(src, \"Namespace\", \"ns\", \"name\")) == (res, err)\n\n # All Namespaces.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces\",\n )\n\n # Same as above because the \"namespace\" argument is ignored for Namespaces.\n assert k8s.resource(config, MM(src, \"Namespace\", \"name\", \"\")) == (res, err)" ]
[ "0.6505439", "0.5749894", "0.57462305", "0.5733718", "0.5730093", "0.565757", "0.5604151", "0.5550088", "0.5544834", "0.54473704", "0.54402137", "0.54314804", "0.5424065", "0.5399088", "0.5390702", "0.5361906", "0.53527755", "0.5319915", "0.5289782", "0.52817404", "0.5274568", "0.5268837", "0.52671564", "0.5242845", "0.5234903", "0.5232816", "0.522204", "0.5212945", "0.52094436", "0.52061236", "0.5204312", "0.5196023", "0.51803505", "0.5172443", "0.5169334", "0.5160144", "0.51509315", "0.5144639", "0.5124473", "0.5121889", "0.5115947", "0.51155245", "0.5114491", "0.5101776", "0.5098574", "0.5090989", "0.5086015", "0.50681585", "0.5066008", "0.5063273", "0.50502425", "0.5047447", "0.50464386", "0.5041914", "0.50398535", "0.5034393", "0.5030239", "0.5016329", "0.5005658", "0.50022745", "0.49998307", "0.49938563", "0.49929053", "0.49899298", "0.49566424", "0.49428436", "0.4927181", "0.49230433", "0.49228016", "0.49219507", "0.4918144", "0.4918144", "0.4918144", "0.4918144", "0.49175933", "0.49166116", "0.49010724", "0.4898463", "0.48981318", "0.48959774", "0.4889538", "0.4889087", "0.48884854", "0.48846203", "0.48839945", "0.48829386", "0.48799926", "0.48786178", "0.48772627", "0.48768553", "0.4870888", "0.486936", "0.4868006", "0.48675695", "0.48645464", "0.4855191", "0.4851398", "0.48497176", "0.48480508", "0.484711", "0.48432744" ]
0.0
-1
Resource Management allows you to build an organizational structure for resources based on your business requirements. You can use resource directories, folders, accounts, and resource groups to hierarchically organize and manage resources. For more information, see [What is Resource Management?](~~94475~~)
async def modify_resource_group_with_options_async( self, request: dds_20151201_models.ModifyResourceGroupRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifyResourceGroupResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.region_id): query['RegionId'] = request.region_id if not UtilClient.is_unset(request.resource_group_id): query['ResourceGroupId'] = request.resource_group_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifyResourceGroup', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifyResourceGroupResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def resources():\n check_resources()", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def resource_prefix(self):", "def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }", "def resources(self):\n return self.__resources", "def resource_map(self):", "def test_create_namespaced_local_resource_access_review(self):\n pass", "def test_create_namespaced_resource_access_review(self):\n pass", "def ResourcePath(self, name):\n pass", "def _categorize_resource(self, resource: Resource, required_permissions: str) -> None:\n if resource.is_user_provided:\n self.resources_reused.append({\"arn\": resource.arn, \"required_permissions\": required_permissions})\n else:\n self.resources_created.append({\"arn\": resource.arn})", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resource_manager():\n return visa.ResourceManager()", "def testSubResources(self):\n\n def CountResourceTree(resource):\n ret = 0\n for r in resource._resources:\n ret += 1 + CountResourceTree(r)\n return ret\n\n api = self.ApiFromDiscoveryDoc('moderator.v1.json')\n top_level_resources = 0\n total_resources = 0\n non_method_resources = 0\n have_sub_resources = 0\n have_sub_resources_and_methods = 0\n for r in api._resources:\n top_level_resources += 1\n total_resources += 1 + CountResourceTree(r)\n if not r._methods:\n non_method_resources += 1\n if r._resources:\n have_sub_resources += 1\n if r._resources and r._methods:\n have_sub_resources_and_methods += 1\n # Hand counted 18 resources in the file.\n self.assertEquals(18, total_resources)\n self.assertEquals(11, top_level_resources)\n # 4 of them have no methods, only sub resources\n self.assertEquals(4, non_method_resources)\n # 6 of them have sub resources.\n self.assertEquals(6, have_sub_resources)\n # And, of course, 2 should have both sub resources and methods\n self.assertEquals(2, have_sub_resources_and_methods)", "def test_create_local_resource_access_review_for_all_namespaces(self):\n pass", "def GetResourceAclSample():\n client = CreateClient()\n for resource in client.GetResources(limit=5).entry:\n acl_feed = client.GetResourceAcl(resource)\n for acl in acl_feed.entry:\n print acl.role.value, acl.scope.type, acl.scope.value", "def test_objectresource_resourcenameforuid(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n name = yield calendar.resourceNameForUID(\"uid1\")\n self.assertEqual(name, \"1.ics\")\n\n name = yield calendar.resourceNameForUID(\"uid2\")\n self.assertEqual(name, \"2.ics\")\n\n name = yield calendar.resourceNameForUID(\"foo\")\n self.assertEqual(name, None)\n\n yield self.commitTransaction(1)", "def test_create_resource_group(self):\n pass", "def create_resource_object():\n\n # Create two objects of different users and same center code\n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=1, cooperative_center_code='BR1.1')\n \n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n # Create one object of diffent center code\n Resource.objects.create(status=0, title='Recurso de teste (PY3.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=3, cooperative_center_code='PY3.1')\n\n\n # add descriptor and thematic area for resource pk 1\n object_ct = ContentType.objects.get_for_model(Resource)\n descriptor = Descriptor.objects.create(object_id=1, content_type=object_ct, text='descritor 1')\n keyword = Keyword.objects.create(object_id=1, content_type=object_ct, text='keyword 1')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=object_ct, thematic_area_id=1)", "def getResources(self, folder):\n\n #-------------------- \n # Get the resource JSON\n #-------------------- \n folder += \"/resources\"\n resources = self.__getJson(folder)\n #print(\"%s %s\"%(, folder))\n #print(\" Got resources: '%s'\"%(str(resources)))\n\n\n\n #-------------------- \n # Filter the JSONs\n #-------------------- \n resourceNames = []\n for r in resources:\n if 'label' in r:\n resourceNames.append(r['label'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['label']))\n elif 'Name' in r:\n resourceNames.append(r['Name'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['Name']))\n\n return resourceNames", "def resources(request):\n projects, secrets, pools, storageclasses, pvcs, pods = ([] for i in range(6))\n\n def finalizer():\n \"\"\"\n Delete the resources created during the test\n \"\"\"\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)\n\n request.addfinalizer(finalizer)\n\n return projects, secrets, pools, storageclasses, pvcs, pods", "def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources", "def CreateResourceInCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc, collection=col)\n print 'Created:', doc.title.text, doc.resource_id.text", "def test_getResourceRelations(self):\n pass", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def generate_config(context):\n\n properties = context.properties\n\n base_resource = get_type(context)\n\n resources = []\n\n if 'dependsOn' in properties:\n dependson = {'metadata': {'dependsOn': properties['dependsOn']}}\n dependson_root = properties['dependsOn']\n else:\n dependson = {}\n dependson_root = []\n\n for role in properties['roles']:\n for member in role['members']:\n suffix = sha1(\n '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10]\n policy_get_name = '{}-{}'.format(context.env['name'], suffix)\n\n resource_name = '{}-{}'.format(policy_get_name,\n base_resource['postfix'])\n iam_resource = {\n 'name': resource_name,\n # TODO - Virtual type documentation needed\n 'type': base_resource['dm_type'],\n 'properties': {\n base_resource['dm_resource_property']: base_resource['id'],\n 'role': role['role'],\n 'member': member,\n }\n }\n iam_resource.update(dependson)\n resources.append(iam_resource)\n\n dependson = {'metadata': {'dependsOn': [\n resource_name] + dependson_root}}\n\n return {\"resources\": resources}", "def resources(stack, region, profile):\n logging.debug(f'finding resources - stack: {stack}')\n logging.debug(f'region: {region}')\n logging.debug(f'profile: {profile}')\n tool = ResourceTool(\n Stack=stack,\n Region=region,\n Profile=profile,\n Verbose=True\n )\n\n if tool.list_resources():\n sys.exit(0)\n else:\n sys.exit(1)", "def subdir(self):", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def format_resource_tree(\n nested_resources, # type: NestedResourceNodes\n db_session, # type: Session\n resources_perms_dict=None, # type: Optional[ResourcePermissionMap]\n permission_type=None, # type: Optional[PermissionType]\n nesting_key=\"children\", # type: NestingKeyType\n): # type: (...) -> JSON\n # optimization to avoid re-lookup of 'allowed permissions' when already fetched\n # unused when parsing 'applied permissions'\n __internal_svc_res_perm_dict = {}\n\n def recursive_fmt_res_tree(nested_dict): # type: (NestedResourceNodes) -> JSON\n fmt_res_tree = {}\n for child_id, child_dict in nested_dict.items():\n resource = child_dict[\"node\"]\n # nested nodes always use 'children' regardless of nested-key\n # nested-key employed in the generated format will indicate the real resource parents/children relationship\n new_nested = child_dict[\"children\"]\n perms = []\n\n # case of pre-specified user/group-specific permissions\n if resources_perms_dict is not None:\n if resource.resource_id in resources_perms_dict.keys():\n perms = resources_perms_dict[resource.resource_id]\n\n # case of full fetch (allowed resource permissions)\n else:\n # directly access the resource if it is a service\n service = None # type: Optional[Service]\n if resource.root_service_id is None:\n service = resource\n service_id = resource.resource_id\n # obtain corresponding top-level service resource if not already available,\n # get resource permissions allowed under the top service's scope\n else:\n service_id = resource.root_service_id\n if service_id not in __internal_svc_res_perm_dict:\n service = ResourceService.by_resource_id(service_id, db_session=db_session)\n # add to dict only if not already added\n if service is not None and service_id not in __internal_svc_res_perm_dict:\n __internal_svc_res_perm_dict[service_id] = {\n res_type.resource_type_name: res_perms # use str key to match below 'resource_type' field\n for res_type, res_perms in SERVICE_TYPE_DICT[service.type].resource_types_permissions.items()\n }\n # in case of inverse nesting, service could be at \"bottom\"\n # retrieve its permissions directly since its type is never expected nested under itself\n res_type_name = resource.resource_type # type: Str\n if res_type_name == \"service\":\n perms = SERVICE_TYPE_DICT[service.type].permissions\n else:\n perms = __internal_svc_res_perm_dict[service_id][resource.resource_type]\n\n fmt_res_tree[child_id] = format_resource(resource, perms, permission_type)\n fmt_res_tree[child_id][nesting_key] = recursive_fmt_res_tree(new_nested)\n return fmt_res_tree\n\n return recursive_fmt_res_tree(nested_resources)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n group: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n organization_arn: Optional[pulumi.Input[str]] = None,\n organizational_unit_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def assemble_resources(base_dir, resource_path, resources, type_restrictions=None):\n for f in resources:\n if type_restrictions and f.kind not in type_restrictions:\n continue\n target_dir = os.path.abspath(os.path.join(base_dir, resource_path, ResourceFile.DIR_MAP[f.kind]))\n f.copy_all_variants_to_dir(target_dir)", "def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def resources(self, resources):\n self._resources = resources", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def test_get_deployment_resources(self):\n pass", "def is_reserved_resource(self, work_dir: str, resource: str) -> bool:\n resource_dir = resource.split(\"/\")[0] if \"/\" in resource else resource\n if resource.startswith(\".resumables-\") and resource.endswith(\".db\"):\n logging.error(f\"resumable dbs not accessible {resource}\")\n return True\n elif re.match(r\"(.+)\\.([a-f\\d0-9-]{32,36})$\", resource):\n logging.error(\"merged resumable files not accessible\")\n return True\n elif re.match(r\"(.+).([a-f\\d0-9-]{32,36}).part$\", resource):\n logging.error(\"partial upload files not accessible\")\n return True\n elif VALID_UUID.match(resource_dir):\n potential_target = os.path.normpath(f\"{work_dir}/{resource_dir}\")\n if os.path.lexists(potential_target) and os.path.isdir(potential_target):\n content = os.listdir(potential_target)\n for entry in content:\n if re.match(r\"(.+).chunk.[0-9]+$\", entry):\n logging.error(f\"resumable directories not accessible {entry}\")\n return True\n return False", "def resource(self, n):\n\n cfg = self.read()\n\n for res in cfg.get('Resources', []):\n res_name = res.get('Resource')\n\n if res_name == n:\n return ConfigResource(res)", "def resources(self):\n return [self]", "def test_access_resource(self):\n test_resource = ResourceTypeName.get()\n role_name = 'test_role'\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n with self.subTest(\"Permission is denied\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 403)\n\n role_request_body = {\n \"role_id\": role_name,\n \"policy\": {\n 'Statement': [{\n 'Sid': role_name,\n 'Action': [\n \"fus:DeleteResources\",\n \"fus:GetResources\"],\n 'Effect': 'Allow',\n 'Resource': [f\"arn:hca:fus:*:*:resource/{test_resource}\"]\n }]\n }\n }\n resp = self.app.post(f'/v1/role', data=json.dumps(role_request_body), headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n resp = self.app.put(f\"/v1/user/{service_accounts['user']['client_email']}/roles?action=add\",\n data=json.dumps({'roles': [role_name]}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n with self.subTest(\"Permission is granted\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 200)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def register_resources(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n domain_id: Optional[pulumi.Input[str]] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n role_id: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def register_dcc_resource_path(resources_path):\n\n pass", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def add_resources(event):\n anuket_resources.need()", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def get_resources(self, resource_data=None):\n if not resource_data and self.component:\n resource_data = self.component.get_resource_data()\n\n resources = []\n for resource in self.files:\n resource.update(resource_data)\n\n resource['storage_path'] = self.prefix + '/' + resource['name']\n relative_path = self.relative_path(data=resource)\n resource['relative_path'] = relative_path\n resource['url'] = resource['url'] + '/' + relative_path\n resources.append(resource)\n return resources", "def test_create_cluster_resource_quota(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def test_sm_resource_object(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_oid = faked_storage_group.oid\n\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_group = storage_group_mgr.resource_object(storage_group_oid)\n\n storage_group_uri = \"/api/storage-groups/\" + storage_group_oid\n\n sv_mgr = storage_group.storage_volumes\n vsr_mgr = storage_group.virtual_storage_resources\n\n assert isinstance(storage_group, StorageGroup)\n assert isinstance(sv_mgr, StorageVolumeManager)\n assert isinstance(vsr_mgr, VirtualStorageResourceManager)\n\n sg_cpc = storage_group.cpc\n assert isinstance(sg_cpc, Cpc)\n assert sg_cpc.uri == storage_group.properties['cpc-uri']\n\n # Note: Properties inherited from BaseResource are tested there,\n # but we test them again:\n assert storage_group.properties['object-uri'] == storage_group_uri\n assert storage_group.properties['object-id'] == storage_group_oid\n assert storage_group.properties['class'] == 'storage-group'\n assert storage_group.properties['parent'] == self.console.uri", "def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def CreateResources(self, manifests, region):\n resource_dict = manifest_util.ParseDeployConfig(self.messages, manifests,\n region)\n msg_template = 'Created Cloud Deploy resource: {}.'\n # Create delivery pipeline first.\n # In case user has both types of pipeline definition in the same\n # config file.\n pipelines = resource_dict[manifest_util.DELIVERY_PIPELINE_KIND_V1BETA1]\n if pipelines:\n operation_dict = {}\n for resource in pipelines:\n operation_dict[resource.name] = self.CreateDeliveryPipeline(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # In case user has both types of target definition in the same\n # config file.\n targets = resource_dict[manifest_util.TARGET_KIND_V1BETA1]\n if targets:\n operation_dict = {}\n for resource in targets:\n operation_dict[resource.name] = target_util.PatchTarget(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create automation resource.\n automations = resource_dict[manifest_util.AUTOMATION_KIND]\n operation_dict = {}\n for resource in automations:\n operation_dict[resource.name] = automation_util.PatchAutomation(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create custom target type resource.\n custom_target_types = resource_dict[manifest_util.CUSTOM_TARGET_TYPE_KIND]\n operation_dict = {}\n for resource in custom_target_types:\n operation_dict[resource.name] = (\n custom_target_type_util.PatchCustomTargetType(resource)\n )\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)", "def getResource(self):\n pass;", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def test_objectresource_countobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n count = yield calendar.countObjectResources()\n self.assertEqual(count, 2)\n yield self.commitTransaction(1)", "def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n managed_network_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text", "def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")", "def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)", "def test_objectresource_objectwith(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n resource01 = yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n resource = yield calendar.objectResourceWithName(\"2.ics\")\n self.assertEqual(resource.name(), \"2.ics\")\n\n resource = yield calendar.objectResourceWithName(\"foo.ics\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithUID(\"uid1\")\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithUID(\"foo\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithID(resource01.id())\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithID(12345)\n self.assertEqual(resource, None)\n\n yield self.commitTransaction(1)", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def create_resources(self, pool=True, job=True, storage=True):\n\n if pool:\n self.controller.create_pool(self.info)\n self.logger.info(\"Pool of the mission %s created.\", self.info.name)\n\n if job:\n self.controller.create_job(self.info)\n self.logger.info(\"Job of the mission %s created.\", self.info.name)\n\n if storage:\n self.controller.create_storage_container(self.info)\n self.controller.get_storage_container_access_tokens(self.info)\n self.logger.info(\"Storage of the mission %s created.\", self.info.name)\n\n self.logger.info(\"Resources of the mission %s created.\", self.info.name)", "def install_private_resources(context):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1", "def test_resource_combinations_rpc(\n self, ns_resource_factory, bucket_factory, platform1, platform2\n ):\n # Create the namespace resources and verify health\n ns_resource_name1 = ns_resource_factory(platform=platform1)[1]\n ns_resource_name2 = ns_resource_factory(platform=platform2)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name1,\n read_ns_resources=[ns_resource_name1, ns_resource_name2],\n )", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resourceid(self):", "def resources(ctx, job, gpu):\n\n def get_experiment_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment.resources(\n user, project_name, _experiment, message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n def get_experiment_job_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment_job.resources(user,\n project_name,\n _experiment,\n _job,\n message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_resources()\n else:\n get_experiment_resources()", "def test_resource_namespace(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n MM = MetaManifest\n\n for src in [\"\", \"v1\"]:\n # A particular Namespace.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces/name\",\n )\n\n # A particular Namespace in a particular namespace -> Invalid.\n assert k8s.resource(config, MM(src, \"Namespace\", \"ns\", \"name\")) == (res, err)\n\n # All Namespaces.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces\",\n )\n\n # Same as above because the \"namespace\" argument is ignored for Namespaces.\n assert k8s.resource(config, MM(src, \"Namespace\", \"name\", \"\")) == (res, err)" ]
[ "0.6505323", "0.57500225", "0.5746185", "0.57347137", "0.5730045", "0.5657918", "0.5604309", "0.5549817", "0.55444926", "0.54474324", "0.54407877", "0.54324114", "0.54243886", "0.5396971", "0.5391554", "0.5361762", "0.53541374", "0.5320343", "0.528993", "0.52822185", "0.52756464", "0.52688456", "0.52669996", "0.5243261", "0.5236253", "0.5233608", "0.52227277", "0.52138656", "0.5209661", "0.52072483", "0.52041715", "0.51961666", "0.5180387", "0.5173579", "0.5168987", "0.51608366", "0.51517946", "0.5145575", "0.512511", "0.51215297", "0.5116804", "0.511622", "0.5115523", "0.5102153", "0.50996006", "0.50905573", "0.50862205", "0.50686175", "0.5066495", "0.5064043", "0.5050659", "0.50478506", "0.50464505", "0.5042504", "0.5039299", "0.50351787", "0.50301725", "0.50154555", "0.50051713", "0.5002701", "0.50010765", "0.4994619", "0.499339", "0.49906406", "0.49571446", "0.49418736", "0.49270067", "0.49230695", "0.4921743", "0.4921361", "0.49186987", "0.49177045", "0.49177045", "0.49177045", "0.49177045", "0.4917115", "0.4901521", "0.48987564", "0.4898386", "0.48957208", "0.48899916", "0.48898923", "0.48890716", "0.4884598", "0.48842147", "0.48834944", "0.4880332", "0.48793304", "0.48772493", "0.48764163", "0.4871806", "0.48708284", "0.48679227", "0.4867406", "0.4863927", "0.48552412", "0.48522156", "0.4849889", "0.48485264", "0.48472816", "0.48432577" ]
0.0
-1
Resource Management allows you to build an organizational structure for resources based on your business requirements. You can use resource directories, folders, accounts, and resource groups to hierarchically organize and manage resources. For more information, see [What is Resource Management?](~~94475~~)
def modify_resource_group( self, request: dds_20151201_models.ModifyResourceGroupRequest, ) -> dds_20151201_models.ModifyResourceGroupResponse: runtime = util_models.RuntimeOptions() return self.modify_resource_group_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def resources():\n check_resources()", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def resource_prefix(self):", "def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }", "def resources(self):\n return self.__resources", "def resource_map(self):", "def test_create_namespaced_local_resource_access_review(self):\n pass", "def test_create_namespaced_resource_access_review(self):\n pass", "def ResourcePath(self, name):\n pass", "def _categorize_resource(self, resource: Resource, required_permissions: str) -> None:\n if resource.is_user_provided:\n self.resources_reused.append({\"arn\": resource.arn, \"required_permissions\": required_permissions})\n else:\n self.resources_created.append({\"arn\": resource.arn})", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resource_manager():\n return visa.ResourceManager()", "def testSubResources(self):\n\n def CountResourceTree(resource):\n ret = 0\n for r in resource._resources:\n ret += 1 + CountResourceTree(r)\n return ret\n\n api = self.ApiFromDiscoveryDoc('moderator.v1.json')\n top_level_resources = 0\n total_resources = 0\n non_method_resources = 0\n have_sub_resources = 0\n have_sub_resources_and_methods = 0\n for r in api._resources:\n top_level_resources += 1\n total_resources += 1 + CountResourceTree(r)\n if not r._methods:\n non_method_resources += 1\n if r._resources:\n have_sub_resources += 1\n if r._resources and r._methods:\n have_sub_resources_and_methods += 1\n # Hand counted 18 resources in the file.\n self.assertEquals(18, total_resources)\n self.assertEquals(11, top_level_resources)\n # 4 of them have no methods, only sub resources\n self.assertEquals(4, non_method_resources)\n # 6 of them have sub resources.\n self.assertEquals(6, have_sub_resources)\n # And, of course, 2 should have both sub resources and methods\n self.assertEquals(2, have_sub_resources_and_methods)", "def test_create_local_resource_access_review_for_all_namespaces(self):\n pass", "def GetResourceAclSample():\n client = CreateClient()\n for resource in client.GetResources(limit=5).entry:\n acl_feed = client.GetResourceAcl(resource)\n for acl in acl_feed.entry:\n print acl.role.value, acl.scope.type, acl.scope.value", "def test_objectresource_resourcenameforuid(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n name = yield calendar.resourceNameForUID(\"uid1\")\n self.assertEqual(name, \"1.ics\")\n\n name = yield calendar.resourceNameForUID(\"uid2\")\n self.assertEqual(name, \"2.ics\")\n\n name = yield calendar.resourceNameForUID(\"foo\")\n self.assertEqual(name, None)\n\n yield self.commitTransaction(1)", "def test_create_resource_group(self):\n pass", "def create_resource_object():\n\n # Create two objects of different users and same center code\n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=1, cooperative_center_code='BR1.1')\n \n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n # Create one object of diffent center code\n Resource.objects.create(status=0, title='Recurso de teste (PY3.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=3, cooperative_center_code='PY3.1')\n\n\n # add descriptor and thematic area for resource pk 1\n object_ct = ContentType.objects.get_for_model(Resource)\n descriptor = Descriptor.objects.create(object_id=1, content_type=object_ct, text='descritor 1')\n keyword = Keyword.objects.create(object_id=1, content_type=object_ct, text='keyword 1')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=object_ct, thematic_area_id=1)", "def getResources(self, folder):\n\n #-------------------- \n # Get the resource JSON\n #-------------------- \n folder += \"/resources\"\n resources = self.__getJson(folder)\n #print(\"%s %s\"%(, folder))\n #print(\" Got resources: '%s'\"%(str(resources)))\n\n\n\n #-------------------- \n # Filter the JSONs\n #-------------------- \n resourceNames = []\n for r in resources:\n if 'label' in r:\n resourceNames.append(r['label'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['label']))\n elif 'Name' in r:\n resourceNames.append(r['Name'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['Name']))\n\n return resourceNames", "def resources(request):\n projects, secrets, pools, storageclasses, pvcs, pods = ([] for i in range(6))\n\n def finalizer():\n \"\"\"\n Delete the resources created during the test\n \"\"\"\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)\n\n request.addfinalizer(finalizer)\n\n return projects, secrets, pools, storageclasses, pvcs, pods", "def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources", "def CreateResourceInCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc, collection=col)\n print 'Created:', doc.title.text, doc.resource_id.text", "def test_getResourceRelations(self):\n pass", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def generate_config(context):\n\n properties = context.properties\n\n base_resource = get_type(context)\n\n resources = []\n\n if 'dependsOn' in properties:\n dependson = {'metadata': {'dependsOn': properties['dependsOn']}}\n dependson_root = properties['dependsOn']\n else:\n dependson = {}\n dependson_root = []\n\n for role in properties['roles']:\n for member in role['members']:\n suffix = sha1(\n '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10]\n policy_get_name = '{}-{}'.format(context.env['name'], suffix)\n\n resource_name = '{}-{}'.format(policy_get_name,\n base_resource['postfix'])\n iam_resource = {\n 'name': resource_name,\n # TODO - Virtual type documentation needed\n 'type': base_resource['dm_type'],\n 'properties': {\n base_resource['dm_resource_property']: base_resource['id'],\n 'role': role['role'],\n 'member': member,\n }\n }\n iam_resource.update(dependson)\n resources.append(iam_resource)\n\n dependson = {'metadata': {'dependsOn': [\n resource_name] + dependson_root}}\n\n return {\"resources\": resources}", "def resources(stack, region, profile):\n logging.debug(f'finding resources - stack: {stack}')\n logging.debug(f'region: {region}')\n logging.debug(f'profile: {profile}')\n tool = ResourceTool(\n Stack=stack,\n Region=region,\n Profile=profile,\n Verbose=True\n )\n\n if tool.list_resources():\n sys.exit(0)\n else:\n sys.exit(1)", "def subdir(self):", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def format_resource_tree(\n nested_resources, # type: NestedResourceNodes\n db_session, # type: Session\n resources_perms_dict=None, # type: Optional[ResourcePermissionMap]\n permission_type=None, # type: Optional[PermissionType]\n nesting_key=\"children\", # type: NestingKeyType\n): # type: (...) -> JSON\n # optimization to avoid re-lookup of 'allowed permissions' when already fetched\n # unused when parsing 'applied permissions'\n __internal_svc_res_perm_dict = {}\n\n def recursive_fmt_res_tree(nested_dict): # type: (NestedResourceNodes) -> JSON\n fmt_res_tree = {}\n for child_id, child_dict in nested_dict.items():\n resource = child_dict[\"node\"]\n # nested nodes always use 'children' regardless of nested-key\n # nested-key employed in the generated format will indicate the real resource parents/children relationship\n new_nested = child_dict[\"children\"]\n perms = []\n\n # case of pre-specified user/group-specific permissions\n if resources_perms_dict is not None:\n if resource.resource_id in resources_perms_dict.keys():\n perms = resources_perms_dict[resource.resource_id]\n\n # case of full fetch (allowed resource permissions)\n else:\n # directly access the resource if it is a service\n service = None # type: Optional[Service]\n if resource.root_service_id is None:\n service = resource\n service_id = resource.resource_id\n # obtain corresponding top-level service resource if not already available,\n # get resource permissions allowed under the top service's scope\n else:\n service_id = resource.root_service_id\n if service_id not in __internal_svc_res_perm_dict:\n service = ResourceService.by_resource_id(service_id, db_session=db_session)\n # add to dict only if not already added\n if service is not None and service_id not in __internal_svc_res_perm_dict:\n __internal_svc_res_perm_dict[service_id] = {\n res_type.resource_type_name: res_perms # use str key to match below 'resource_type' field\n for res_type, res_perms in SERVICE_TYPE_DICT[service.type].resource_types_permissions.items()\n }\n # in case of inverse nesting, service could be at \"bottom\"\n # retrieve its permissions directly since its type is never expected nested under itself\n res_type_name = resource.resource_type # type: Str\n if res_type_name == \"service\":\n perms = SERVICE_TYPE_DICT[service.type].permissions\n else:\n perms = __internal_svc_res_perm_dict[service_id][resource.resource_type]\n\n fmt_res_tree[child_id] = format_resource(resource, perms, permission_type)\n fmt_res_tree[child_id][nesting_key] = recursive_fmt_res_tree(new_nested)\n return fmt_res_tree\n\n return recursive_fmt_res_tree(nested_resources)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n group: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n organization_arn: Optional[pulumi.Input[str]] = None,\n organizational_unit_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def assemble_resources(base_dir, resource_path, resources, type_restrictions=None):\n for f in resources:\n if type_restrictions and f.kind not in type_restrictions:\n continue\n target_dir = os.path.abspath(os.path.join(base_dir, resource_path, ResourceFile.DIR_MAP[f.kind]))\n f.copy_all_variants_to_dir(target_dir)", "def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def resources(self, resources):\n self._resources = resources", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def test_get_deployment_resources(self):\n pass", "def is_reserved_resource(self, work_dir: str, resource: str) -> bool:\n resource_dir = resource.split(\"/\")[0] if \"/\" in resource else resource\n if resource.startswith(\".resumables-\") and resource.endswith(\".db\"):\n logging.error(f\"resumable dbs not accessible {resource}\")\n return True\n elif re.match(r\"(.+)\\.([a-f\\d0-9-]{32,36})$\", resource):\n logging.error(\"merged resumable files not accessible\")\n return True\n elif re.match(r\"(.+).([a-f\\d0-9-]{32,36}).part$\", resource):\n logging.error(\"partial upload files not accessible\")\n return True\n elif VALID_UUID.match(resource_dir):\n potential_target = os.path.normpath(f\"{work_dir}/{resource_dir}\")\n if os.path.lexists(potential_target) and os.path.isdir(potential_target):\n content = os.listdir(potential_target)\n for entry in content:\n if re.match(r\"(.+).chunk.[0-9]+$\", entry):\n logging.error(f\"resumable directories not accessible {entry}\")\n return True\n return False", "def resource(self, n):\n\n cfg = self.read()\n\n for res in cfg.get('Resources', []):\n res_name = res.get('Resource')\n\n if res_name == n:\n return ConfigResource(res)", "def resources(self):\n return [self]", "def test_access_resource(self):\n test_resource = ResourceTypeName.get()\n role_name = 'test_role'\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n with self.subTest(\"Permission is denied\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 403)\n\n role_request_body = {\n \"role_id\": role_name,\n \"policy\": {\n 'Statement': [{\n 'Sid': role_name,\n 'Action': [\n \"fus:DeleteResources\",\n \"fus:GetResources\"],\n 'Effect': 'Allow',\n 'Resource': [f\"arn:hca:fus:*:*:resource/{test_resource}\"]\n }]\n }\n }\n resp = self.app.post(f'/v1/role', data=json.dumps(role_request_body), headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n resp = self.app.put(f\"/v1/user/{service_accounts['user']['client_email']}/roles?action=add\",\n data=json.dumps({'roles': [role_name]}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n with self.subTest(\"Permission is granted\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 200)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def register_resources(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n domain_id: Optional[pulumi.Input[str]] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n role_id: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def register_dcc_resource_path(resources_path):\n\n pass", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def add_resources(event):\n anuket_resources.need()", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def get_resources(self, resource_data=None):\n if not resource_data and self.component:\n resource_data = self.component.get_resource_data()\n\n resources = []\n for resource in self.files:\n resource.update(resource_data)\n\n resource['storage_path'] = self.prefix + '/' + resource['name']\n relative_path = self.relative_path(data=resource)\n resource['relative_path'] = relative_path\n resource['url'] = resource['url'] + '/' + relative_path\n resources.append(resource)\n return resources", "def test_create_cluster_resource_quota(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def test_sm_resource_object(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_oid = faked_storage_group.oid\n\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_group = storage_group_mgr.resource_object(storage_group_oid)\n\n storage_group_uri = \"/api/storage-groups/\" + storage_group_oid\n\n sv_mgr = storage_group.storage_volumes\n vsr_mgr = storage_group.virtual_storage_resources\n\n assert isinstance(storage_group, StorageGroup)\n assert isinstance(sv_mgr, StorageVolumeManager)\n assert isinstance(vsr_mgr, VirtualStorageResourceManager)\n\n sg_cpc = storage_group.cpc\n assert isinstance(sg_cpc, Cpc)\n assert sg_cpc.uri == storage_group.properties['cpc-uri']\n\n # Note: Properties inherited from BaseResource are tested there,\n # but we test them again:\n assert storage_group.properties['object-uri'] == storage_group_uri\n assert storage_group.properties['object-id'] == storage_group_oid\n assert storage_group.properties['class'] == 'storage-group'\n assert storage_group.properties['parent'] == self.console.uri", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def CreateResources(self, manifests, region):\n resource_dict = manifest_util.ParseDeployConfig(self.messages, manifests,\n region)\n msg_template = 'Created Cloud Deploy resource: {}.'\n # Create delivery pipeline first.\n # In case user has both types of pipeline definition in the same\n # config file.\n pipelines = resource_dict[manifest_util.DELIVERY_PIPELINE_KIND_V1BETA1]\n if pipelines:\n operation_dict = {}\n for resource in pipelines:\n operation_dict[resource.name] = self.CreateDeliveryPipeline(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # In case user has both types of target definition in the same\n # config file.\n targets = resource_dict[manifest_util.TARGET_KIND_V1BETA1]\n if targets:\n operation_dict = {}\n for resource in targets:\n operation_dict[resource.name] = target_util.PatchTarget(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create automation resource.\n automations = resource_dict[manifest_util.AUTOMATION_KIND]\n operation_dict = {}\n for resource in automations:\n operation_dict[resource.name] = automation_util.PatchAutomation(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create custom target type resource.\n custom_target_types = resource_dict[manifest_util.CUSTOM_TARGET_TYPE_KIND]\n operation_dict = {}\n for resource in custom_target_types:\n operation_dict[resource.name] = (\n custom_target_type_util.PatchCustomTargetType(resource)\n )\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)", "def getResource(self):\n pass;", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def test_objectresource_countobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n count = yield calendar.countObjectResources()\n self.assertEqual(count, 2)\n yield self.commitTransaction(1)", "def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n managed_network_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text", "def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")", "def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)", "def test_objectresource_objectwith(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n resource01 = yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n resource = yield calendar.objectResourceWithName(\"2.ics\")\n self.assertEqual(resource.name(), \"2.ics\")\n\n resource = yield calendar.objectResourceWithName(\"foo.ics\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithUID(\"uid1\")\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithUID(\"foo\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithID(resource01.id())\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithID(12345)\n self.assertEqual(resource, None)\n\n yield self.commitTransaction(1)", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def create_resources(self, pool=True, job=True, storage=True):\n\n if pool:\n self.controller.create_pool(self.info)\n self.logger.info(\"Pool of the mission %s created.\", self.info.name)\n\n if job:\n self.controller.create_job(self.info)\n self.logger.info(\"Job of the mission %s created.\", self.info.name)\n\n if storage:\n self.controller.create_storage_container(self.info)\n self.controller.get_storage_container_access_tokens(self.info)\n self.logger.info(\"Storage of the mission %s created.\", self.info.name)\n\n self.logger.info(\"Resources of the mission %s created.\", self.info.name)", "def install_private_resources(context):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def test_resource_combinations_rpc(\n self, ns_resource_factory, bucket_factory, platform1, platform2\n ):\n # Create the namespace resources and verify health\n ns_resource_name1 = ns_resource_factory(platform=platform1)[1]\n ns_resource_name2 = ns_resource_factory(platform=platform2)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name1,\n read_ns_resources=[ns_resource_name1, ns_resource_name2],\n )", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resourceid(self):", "def resources(ctx, job, gpu):\n\n def get_experiment_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment.resources(\n user, project_name, _experiment, message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n def get_experiment_job_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment_job.resources(user,\n project_name,\n _experiment,\n _job,\n message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_resources()\n else:\n get_experiment_resources()", "def test_resource_namespace(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n MM = MetaManifest\n\n for src in [\"\", \"v1\"]:\n # A particular Namespace.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, \"name\"))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces/name\",\n )\n\n # A particular Namespace in a particular namespace -> Invalid.\n assert k8s.resource(config, MM(src, \"Namespace\", \"ns\", \"name\")) == (res, err)\n\n # All Namespaces.\n res, err = k8s.resource(config, MM(src, \"Namespace\", None, None))\n assert not err\n assert res == K8sResource(\n apiVersion=\"v1\",\n kind=\"Namespace\",\n name=\"namespaces\",\n namespaced=False,\n url=f\"{config.url}/api/v1/namespaces\",\n )\n\n # Same as above because the \"namespace\" argument is ignored for Namespaces.\n assert k8s.resource(config, MM(src, \"Namespace\", \"name\", \"\")) == (res, err)" ]
[ "0.6505439", "0.5749894", "0.57462305", "0.5733718", "0.5730093", "0.565757", "0.5604151", "0.5550088", "0.5544834", "0.54473704", "0.54402137", "0.54314804", "0.5424065", "0.5399088", "0.5390702", "0.5361906", "0.53527755", "0.5319915", "0.5289782", "0.52817404", "0.5274568", "0.5268837", "0.52671564", "0.5242845", "0.5234903", "0.5232816", "0.522204", "0.5212945", "0.52094436", "0.52061236", "0.5204312", "0.5196023", "0.51803505", "0.5172443", "0.5169334", "0.5160144", "0.51509315", "0.5144639", "0.5124473", "0.5121889", "0.5115947", "0.51155245", "0.5114491", "0.5101776", "0.5098574", "0.5090989", "0.5086015", "0.50681585", "0.5066008", "0.5063273", "0.50502425", "0.5047447", "0.50464386", "0.5041914", "0.50398535", "0.5034393", "0.5030239", "0.5016329", "0.5005658", "0.50022745", "0.49998307", "0.49938563", "0.49929053", "0.49899298", "0.49566424", "0.49428436", "0.4927181", "0.49230433", "0.49228016", "0.49219507", "0.4918144", "0.4918144", "0.4918144", "0.4918144", "0.49175933", "0.49166116", "0.49010724", "0.4898463", "0.48981318", "0.48959774", "0.4889538", "0.4889087", "0.48884854", "0.48846203", "0.48839945", "0.48829386", "0.48799926", "0.48786178", "0.48772627", "0.48768553", "0.4870888", "0.486936", "0.4868006", "0.48675695", "0.48645464", "0.4855191", "0.4851398", "0.48497176", "0.48480508", "0.484711", "0.48432744" ]
0.0
-1
Resource Management allows you to build an organizational structure for resources based on your business requirements. You can use resource directories, folders, accounts, and resource groups to hierarchically organize and manage resources. For more information, see [What is Resource Management?](~~94475~~)
async def modify_resource_group_async( self, request: dds_20151201_models.ModifyResourceGroupRequest, ) -> dds_20151201_models.ModifyResourceGroupResponse: runtime = util_models.RuntimeOptions() return await self.modify_resource_group_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def gen_resources(self):\n\n print \"\\t* Adding resources to compute template\"\n\n # add all the nets and subnets\n self.gen_net_resources()\n\n # add all routers\n self.gen_router_resources()\n\n # add all servers/intances\n self.gen_server_resources()", "def resources():\n check_resources()", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def resource_prefix(self):", "def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }", "def resources(self):\n return self.__resources", "def resource_map(self):", "def test_create_namespaced_local_resource_access_review(self):\n pass", "def test_create_namespaced_resource_access_review(self):\n pass", "def ResourcePath(self, name):\n pass", "def _categorize_resource(self, resource: Resource, required_permissions: str) -> None:\n if resource.is_user_provided:\n self.resources_reused.append({\"arn\": resource.arn, \"required_permissions\": required_permissions})\n else:\n self.resources_created.append({\"arn\": resource.arn})", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resource_manager():\n return visa.ResourceManager()", "def testSubResources(self):\n\n def CountResourceTree(resource):\n ret = 0\n for r in resource._resources:\n ret += 1 + CountResourceTree(r)\n return ret\n\n api = self.ApiFromDiscoveryDoc('moderator.v1.json')\n top_level_resources = 0\n total_resources = 0\n non_method_resources = 0\n have_sub_resources = 0\n have_sub_resources_and_methods = 0\n for r in api._resources:\n top_level_resources += 1\n total_resources += 1 + CountResourceTree(r)\n if not r._methods:\n non_method_resources += 1\n if r._resources:\n have_sub_resources += 1\n if r._resources and r._methods:\n have_sub_resources_and_methods += 1\n # Hand counted 18 resources in the file.\n self.assertEquals(18, total_resources)\n self.assertEquals(11, top_level_resources)\n # 4 of them have no methods, only sub resources\n self.assertEquals(4, non_method_resources)\n # 6 of them have sub resources.\n self.assertEquals(6, have_sub_resources)\n # And, of course, 2 should have both sub resources and methods\n self.assertEquals(2, have_sub_resources_and_methods)", "def test_create_local_resource_access_review_for_all_namespaces(self):\n pass", "def GetResourceAclSample():\n client = CreateClient()\n for resource in client.GetResources(limit=5).entry:\n acl_feed = client.GetResourceAcl(resource)\n for acl in acl_feed.entry:\n print acl.role.value, acl.scope.type, acl.scope.value", "def test_objectresource_resourcenameforuid(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n name = yield calendar.resourceNameForUID(\"uid1\")\n self.assertEqual(name, \"1.ics\")\n\n name = yield calendar.resourceNameForUID(\"uid2\")\n self.assertEqual(name, \"2.ics\")\n\n name = yield calendar.resourceNameForUID(\"foo\")\n self.assertEqual(name, None)\n\n yield self.commitTransaction(1)", "def test_create_resource_group(self):\n pass", "def create_resource_object():\n\n # Create two objects of different users and same center code\n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=1, cooperative_center_code='BR1.1')\n \n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n # Create one object of diffent center code\n Resource.objects.create(status=0, title='Recurso de teste (PY3.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=3, cooperative_center_code='PY3.1')\n\n\n # add descriptor and thematic area for resource pk 1\n object_ct = ContentType.objects.get_for_model(Resource)\n descriptor = Descriptor.objects.create(object_id=1, content_type=object_ct, text='descritor 1')\n keyword = Keyword.objects.create(object_id=1, content_type=object_ct, text='keyword 1')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=object_ct, thematic_area_id=1)", "def getResources(self, folder):\n\n #-------------------- \n # Get the resource JSON\n #-------------------- \n folder += \"/resources\"\n resources = self.__getJson(folder)\n #print(\"%s %s\"%(, folder))\n #print(\" Got resources: '%s'\"%(str(resources)))\n\n\n\n #-------------------- \n # Filter the JSONs\n #-------------------- \n resourceNames = []\n for r in resources:\n if 'label' in r:\n resourceNames.append(r['label'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['label']))\n elif 'Name' in r:\n resourceNames.append(r['Name'])\n #print(\"FOUND RESOURCE ('%s') : %s\"%(folder, r['Name']))\n\n return resourceNames", "def resources(request):\n projects, secrets, pools, storageclasses, pvcs, pods = ([] for i in range(6))\n\n def finalizer():\n \"\"\"\n Delete the resources created during the test\n \"\"\"\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)\n\n request.addfinalizer(finalizer)\n\n return projects, secrets, pools, storageclasses, pvcs, pods", "def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources", "def CreateResourceInCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc, collection=col)\n print 'Created:', doc.title.text, doc.resource_id.text", "def test_getResourceRelations(self):\n pass", "def collect_resources(namespace, output_dir, api_resources, k8s_cli_input=\"\", selector=\"\"):\n set_file_logger(output_dir)\n k8s_cli = detect_k8s_cli(k8s_cli_input)\n ns_output_dir = os.path.join(output_dir, namespace)\n make_dir(ns_output_dir)\n collect_api_resources(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_api_resources_description(namespace, ns_output_dir, k8s_cli, api_resources, selector)\n collect_pods_logs(namespace, ns_output_dir, k8s_cli, logs_from_all_pods=True)", "def __init__(__self__, resource_name, opts=None, attributes=None, name=None, parent_id=None, realm_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['attributes'] = attributes\n __props__['name'] = name\n __props__['parent_id'] = parent_id\n if realm_id is None:\n raise TypeError(\"Missing required property 'realm_id'\")\n __props__['realm_id'] = realm_id\n __props__['path'] = None\n super(Group, __self__).__init__(\n 'keycloak:index/group:Group',\n resource_name,\n __props__,\n opts)", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def resource(self, *path):\n # TODO(vadimsh): Verify that file exists. Including a case like:\n # module.resource('dir').join('subdir', 'file.py')\n return self._module.RESOURCE_DIRECTORY.join(*path)", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def __init__(self, basedir=None):\n # ------------------------------------------------------------------------\n super(Resources, self).__init__()\n self.xInitialize(basedir or \"resources\")", "def generate_config(context):\n\n properties = context.properties\n\n base_resource = get_type(context)\n\n resources = []\n\n if 'dependsOn' in properties:\n dependson = {'metadata': {'dependsOn': properties['dependsOn']}}\n dependson_root = properties['dependsOn']\n else:\n dependson = {}\n dependson_root = []\n\n for role in properties['roles']:\n for member in role['members']:\n suffix = sha1(\n '{}-{}'.format(role['role'], member).encode('utf-8')).hexdigest()[:10]\n policy_get_name = '{}-{}'.format(context.env['name'], suffix)\n\n resource_name = '{}-{}'.format(policy_get_name,\n base_resource['postfix'])\n iam_resource = {\n 'name': resource_name,\n # TODO - Virtual type documentation needed\n 'type': base_resource['dm_type'],\n 'properties': {\n base_resource['dm_resource_property']: base_resource['id'],\n 'role': role['role'],\n 'member': member,\n }\n }\n iam_resource.update(dependson)\n resources.append(iam_resource)\n\n dependson = {'metadata': {'dependsOn': [\n resource_name] + dependson_root}}\n\n return {\"resources\": resources}", "def resources(stack, region, profile):\n logging.debug(f'finding resources - stack: {stack}')\n logging.debug(f'region: {region}')\n logging.debug(f'profile: {profile}')\n tool = ResourceTool(\n Stack=stack,\n Region=region,\n Profile=profile,\n Verbose=True\n )\n\n if tool.list_resources():\n sys.exit(0)\n else:\n sys.exit(1)", "def subdir(self):", "def get_recipe_resource():\n return os.getenv(\"DKU_CUSTOM_RESOURCE_FOLDER\")", "def get_resource_path():\n return os.path.join(os.path.dirname(__file__), \"resources\") + os.path.sep", "def resource_path(name):\n return os.path.join(\n os.path.dirname(__file__), 'images', 'resource', name)", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def format_resource_tree(\n nested_resources, # type: NestedResourceNodes\n db_session, # type: Session\n resources_perms_dict=None, # type: Optional[ResourcePermissionMap]\n permission_type=None, # type: Optional[PermissionType]\n nesting_key=\"children\", # type: NestingKeyType\n): # type: (...) -> JSON\n # optimization to avoid re-lookup of 'allowed permissions' when already fetched\n # unused when parsing 'applied permissions'\n __internal_svc_res_perm_dict = {}\n\n def recursive_fmt_res_tree(nested_dict): # type: (NestedResourceNodes) -> JSON\n fmt_res_tree = {}\n for child_id, child_dict in nested_dict.items():\n resource = child_dict[\"node\"]\n # nested nodes always use 'children' regardless of nested-key\n # nested-key employed in the generated format will indicate the real resource parents/children relationship\n new_nested = child_dict[\"children\"]\n perms = []\n\n # case of pre-specified user/group-specific permissions\n if resources_perms_dict is not None:\n if resource.resource_id in resources_perms_dict.keys():\n perms = resources_perms_dict[resource.resource_id]\n\n # case of full fetch (allowed resource permissions)\n else:\n # directly access the resource if it is a service\n service = None # type: Optional[Service]\n if resource.root_service_id is None:\n service = resource\n service_id = resource.resource_id\n # obtain corresponding top-level service resource if not already available,\n # get resource permissions allowed under the top service's scope\n else:\n service_id = resource.root_service_id\n if service_id not in __internal_svc_res_perm_dict:\n service = ResourceService.by_resource_id(service_id, db_session=db_session)\n # add to dict only if not already added\n if service is not None and service_id not in __internal_svc_res_perm_dict:\n __internal_svc_res_perm_dict[service_id] = {\n res_type.resource_type_name: res_perms # use str key to match below 'resource_type' field\n for res_type, res_perms in SERVICE_TYPE_DICT[service.type].resource_types_permissions.items()\n }\n # in case of inverse nesting, service could be at \"bottom\"\n # retrieve its permissions directly since its type is never expected nested under itself\n res_type_name = resource.resource_type # type: Str\n if res_type_name == \"service\":\n perms = SERVICE_TYPE_DICT[service.type].permissions\n else:\n perms = __internal_svc_res_perm_dict[service_id][resource.resource_type]\n\n fmt_res_tree[child_id] = format_resource(resource, perms, permission_type)\n fmt_res_tree[child_id][nesting_key] = recursive_fmt_res_tree(new_nested)\n return fmt_res_tree\n\n return recursive_fmt_res_tree(nested_resources)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_id: Optional[pulumi.Input[str]] = None,\n group: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n organization_arn: Optional[pulumi.Input[str]] = None,\n organizational_unit_arn: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def assemble_resources(base_dir, resource_path, resources, type_restrictions=None):\n for f in resources:\n if type_restrictions and f.kind not in type_restrictions:\n continue\n target_dir = os.path.abspath(os.path.join(base_dir, resource_path, ResourceFile.DIR_MAP[f.kind]))\n f.copy_all_variants_to_dir(target_dir)", "def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)", "def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())", "def resources(self, resources):\n self._resources = resources", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def get_resource_dir(cls) -> str:\n return os.path.join(\n os.path.realpath(os.path.dirname(__file__)),\n os.pardir,\n os.pardir,\n os.pardir,\n \"gem5\",\n \"resources\",\n )", "def test_get_deployment_resources(self):\n pass", "def is_reserved_resource(self, work_dir: str, resource: str) -> bool:\n resource_dir = resource.split(\"/\")[0] if \"/\" in resource else resource\n if resource.startswith(\".resumables-\") and resource.endswith(\".db\"):\n logging.error(f\"resumable dbs not accessible {resource}\")\n return True\n elif re.match(r\"(.+)\\.([a-f\\d0-9-]{32,36})$\", resource):\n logging.error(\"merged resumable files not accessible\")\n return True\n elif re.match(r\"(.+).([a-f\\d0-9-]{32,36}).part$\", resource):\n logging.error(\"partial upload files not accessible\")\n return True\n elif VALID_UUID.match(resource_dir):\n potential_target = os.path.normpath(f\"{work_dir}/{resource_dir}\")\n if os.path.lexists(potential_target) and os.path.isdir(potential_target):\n content = os.listdir(potential_target)\n for entry in content:\n if re.match(r\"(.+).chunk.[0-9]+$\", entry):\n logging.error(f\"resumable directories not accessible {entry}\")\n return True\n return False", "def resource(self, n):\n\n cfg = self.read()\n\n for res in cfg.get('Resources', []):\n res_name = res.get('Resource')\n\n if res_name == n:\n return ConfigResource(res)", "def resources(self):\n return [self]", "def test_access_resource(self):\n test_resource = ResourceTypeName.get()\n role_name = 'test_role'\n resp = self.app.post(f'/v1/resource/{test_resource}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n with self.subTest(\"Permission is denied\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 403)\n\n role_request_body = {\n \"role_id\": role_name,\n \"policy\": {\n 'Statement': [{\n 'Sid': role_name,\n 'Action': [\n \"fus:DeleteResources\",\n \"fus:GetResources\"],\n 'Effect': 'Allow',\n 'Resource': [f\"arn:hca:fus:*:*:resource/{test_resource}\"]\n }]\n }\n }\n resp = self.app.post(f'/v1/role', data=json.dumps(role_request_body), headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n resp = self.app.put(f\"/v1/user/{service_accounts['user']['client_email']}/roles?action=add\",\n data=json.dumps({'roles': [role_name]}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n\n with self.subTest(\"Permission is granted\"):\n resp = self.app.get(f'/v1/resource/{test_resource}', headers=user_header)\n self.assertEqual(resp.status_code, 200)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def register_resources(self):\n raise NotImplementedError", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n domain_id: Optional[pulumi.Input[str]] = None,\n group_id: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n role_id: Optional[pulumi.Input[str]] = None,\n user_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def register_dcc_resource_path(resources_path):\n\n pass", "def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)", "def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass", "def resources(filename):\n return send_from_directory(\"resources\", filename)", "def GetAllResourcesSample():\n client = CreateClient()\n # Unlike client.GetResources, this returns a list of resources\n for resource in client.GetAllResources():\n PrintResource(resource)", "def get_recipe_resource():\n return os.getenv(\"SKU_CUSTOM_RECIPE_RESOURCE_FOLDER\")", "def add_resources(event):\n anuket_resources.need()", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def get_resources(self, resource_data=None):\n if not resource_data and self.component:\n resource_data = self.component.get_resource_data()\n\n resources = []\n for resource in self.files:\n resource.update(resource_data)\n\n resource['storage_path'] = self.prefix + '/' + resource['name']\n relative_path = self.relative_path(data=resource)\n resource['relative_path'] = relative_path\n resource['url'] = resource['url'] + '/' + relative_path\n resources.append(resource)\n return resources", "def test_create_cluster_resource_quota(self):\n pass", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def gen_router_resources(self):\n\n print \"\\t* Adding router resources to compute template\"\n\n from nova import version\n year = version.version_string()\n\n for idx, router in enumerate(self.tenant_routers):\n router_ports = []\n for port in self.all_ports:\n if router[\"id\"] == port[\"device_id\"]:\n router_ports.append(port)\n\n # add the router definition\n if \"2013\" in year:\n # Havana Format\n data = {\"type\": \"OS::Neutron::Router\"}\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n\n name = {\"get_resource\": \"router%s\" % str(idx)}\n netid = {\"get_param\": \"public_net_%s\" % str(idx)}\n\n # add the router gateway\n data = {\"type\": \"OS::Neutron::RouterGateway\",\n \"properties\": {\n \"router_id\": name,\n \"network_id\": netid\n }}\n\n self.compute_data[\"resources\"][\"router_gateway%s\" % str(idx)] = data\n\n else:\n # Icehouse Format\n rtrName = router[\"name\"]\n # routers without external gateway\n if router[\"external_gateway_info\"] is not None:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName,\n \"external_gateway_info\": {\n \"network\": {\n \"get_param\": \"public_net_%s\" % str(idx)\n }\n }\n }}\n else:\n data = {\"type\": \"OS::Neutron::Router\",\n \"properties\": {\n \"name\": rtrName\n }\n }\n self.compute_data[\"resources\"][\"router%s\" % str(idx)] = data\n\n # internal port information needed\n internal_interfaces = filter(lambda port: port[\"device_owner\"] == \"network:router_interface\", router_ports)\n\n for idxs, interface in enumerate(internal_interfaces):\n # add the router interface\n\n for fixedip in interface[\"fixed_ips\"]:\n\n # create router interface\n data = {\"type\": \"OS::Neutron::RouterInterface\",\n \"properties\": {\n \"router_id\": {\"get_resource\": \"router%s\" % str(idx)},\n \"port_id\": {\"get_resource\": \"port_%s_%s\" % (str(idx), str(idxs))}\n }}\n self.compute_data[\"resources\"][\"router_interface%s_%s\" % (str(idx), str(idxs))] = data\n\n # create router port\n network = self.neutronclient.show_subnet(fixedip[\"subnet_id\"])[\"subnet\"][\"network_id\"]\n net_name = \"%s\" % str(self.neutronclient.show_network(network)[\"network\"][\"name\"])\n net_id = self.neutronclient.show_network(network)[\"network\"][\"id\"]\n\n fixed_ips = [{\"ip_address\": fixedip[\"ip_address\"]}]\n net = self.neutronclient.show_network(network)[\"network\"]\n if net[\"shared\"] is True:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": net_id\n }}\n else:\n data = {\"type\": \"OS::Neutron::Port\",\n \"properties\": {\n \"fixed_ips\": fixed_ips,\n \"network_id\": {\"get_resource\": net_name}\n }}\n self.compute_data[\"resources\"][\"port_%s_%s\" % (str(idx), str(idxs))] = data", "def test_sm_resource_object(self):\n\n # Add a faked storage_group\n faked_storage_group = self.add_storage_group1()\n storage_group_oid = faked_storage_group.oid\n\n storage_group_mgr = self.console.storage_groups\n\n # Execute the code to be tested\n storage_group = storage_group_mgr.resource_object(storage_group_oid)\n\n storage_group_uri = \"/api/storage-groups/\" + storage_group_oid\n\n sv_mgr = storage_group.storage_volumes\n vsr_mgr = storage_group.virtual_storage_resources\n\n assert isinstance(storage_group, StorageGroup)\n assert isinstance(sv_mgr, StorageVolumeManager)\n assert isinstance(vsr_mgr, VirtualStorageResourceManager)\n\n sg_cpc = storage_group.cpc\n assert isinstance(sg_cpc, Cpc)\n assert sg_cpc.uri == storage_group.properties['cpc-uri']\n\n # Note: Properties inherited from BaseResource are tested there,\n # but we test them again:\n assert storage_group.properties['object-uri'] == storage_group_uri\n assert storage_group.properties['object-id'] == storage_group_oid\n assert storage_group.properties['class'] == 'storage-group'\n assert storage_group.properties['parent'] == self.console.uri", "def set_paths(self, specs, resources):\n self.install = 'install.xml'\n self.specs_path = path_format(specs)\n self.root = path_format(dirname(dirname(self.specs_path)) + '/')\n self.res_path = path_format(resources)\n self.resources['BASE'] = self.res_path\n self.specs['BASE'] = self.specs_path", "def write_resources(self, resources):\n for filename, data in list(resources.get('outputs', {}).items()):\n # Determine where to write the file to\n dest = os.path.join(self.output_dir, filename)\n path = os.path.dirname(dest)\n if path and not os.path.isdir(path):\n os.makedirs(path)\n\n # Write file\n with open(dest, 'wb') as f:\n f.write(data)", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def CreateResources(self, manifests, region):\n resource_dict = manifest_util.ParseDeployConfig(self.messages, manifests,\n region)\n msg_template = 'Created Cloud Deploy resource: {}.'\n # Create delivery pipeline first.\n # In case user has both types of pipeline definition in the same\n # config file.\n pipelines = resource_dict[manifest_util.DELIVERY_PIPELINE_KIND_V1BETA1]\n if pipelines:\n operation_dict = {}\n for resource in pipelines:\n operation_dict[resource.name] = self.CreateDeliveryPipeline(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # In case user has both types of target definition in the same\n # config file.\n targets = resource_dict[manifest_util.TARGET_KIND_V1BETA1]\n if targets:\n operation_dict = {}\n for resource in targets:\n operation_dict[resource.name] = target_util.PatchTarget(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create automation resource.\n automations = resource_dict[manifest_util.AUTOMATION_KIND]\n operation_dict = {}\n for resource in automations:\n operation_dict[resource.name] = automation_util.PatchAutomation(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create custom target type resource.\n custom_target_types = resource_dict[manifest_util.CUSTOM_TARGET_TYPE_KIND]\n operation_dict = {}\n for resource in custom_target_types:\n operation_dict[resource.name] = (\n custom_target_type_util.PatchCustomTargetType(resource)\n )\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)", "def getResource(self):\n pass;", "def test_objectresource_countobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n count = yield calendar.countObjectResources()\n self.assertEqual(count, 2)\n yield self.commitTransaction(1)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def extract_resources(self, resources, collector, cwd=None):\n raise NotImplementedError(\"not implemented\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n managed_network_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def resource_group(self) -> str:\n return pulumi.get(self, \"resource_group\")", "def CreateCollectionSample():\n client = CreateClient()\n col = gdata.docs.data.Resource(type='folder', title='My Sample Folder')\n col = client.CreateResource(col)\n print 'Created collection:', col.title.text, col.resource_id.text", "def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)", "def test_objectresource_objectwith(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n resource01 = yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n resource = yield calendar.objectResourceWithName(\"2.ics\")\n self.assertEqual(resource.name(), \"2.ics\")\n\n resource = yield calendar.objectResourceWithName(\"foo.ics\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithUID(\"uid1\")\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithUID(\"foo\")\n self.assertEqual(resource, None)\n\n resource = yield calendar.objectResourceWithID(resource01.id())\n self.assertEqual(resource.name(), \"1.ics\")\n\n resource = yield calendar.objectResourceWithID(12345)\n self.assertEqual(resource, None)\n\n yield self.commitTransaction(1)", "def validate_resources(self, folder, resources):\r\n self.validate_files_exist(folder, resources)\r\n self.validate_no_duplicate_paths(resources)", "def install_private_resources(context):\n pass", "def create_resources(self, pool=True, job=True, storage=True):\n\n if pool:\n self.controller.create_pool(self.info)\n self.logger.info(\"Pool of the mission %s created.\", self.info.name)\n\n if job:\n self.controller.create_job(self.info)\n self.logger.info(\"Job of the mission %s created.\", self.info.name)\n\n if storage:\n self.controller.create_storage_container(self.info)\n self.controller.get_storage_container_access_tokens(self.info)\n self.logger.info(\"Storage of the mission %s created.\", self.info.name)\n\n self.logger.info(\"Resources of the mission %s created.\", self.info.name)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_devops_enabled: Optional[pulumi.Input[bool]] = None,\n avatar: Optional[pulumi.Input[str]] = None,\n avatar_hash: Optional[pulumi.Input[str]] = None,\n avatar_url: Optional[pulumi.Input[str]] = None,\n default_branch_protection: Optional[pulumi.Input[int]] = None,\n description: Optional[pulumi.Input[str]] = None,\n emails_disabled: Optional[pulumi.Input[bool]] = None,\n extra_shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n full_name: Optional[pulumi.Input[str]] = None,\n full_path: Optional[pulumi.Input[str]] = None,\n ip_restriction_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n lfs_enabled: Optional[pulumi.Input[bool]] = None,\n membership_lock: Optional[pulumi.Input[bool]] = None,\n mentions_disabled: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n parent_id: Optional[pulumi.Input[int]] = None,\n path: Optional[pulumi.Input[str]] = None,\n prevent_forking_outside_group: Optional[pulumi.Input[bool]] = None,\n project_creation_level: Optional[pulumi.Input[str]] = None,\n request_access_enabled: Optional[pulumi.Input[bool]] = None,\n require_two_factor_authentication: Optional[pulumi.Input[bool]] = None,\n runners_token: Optional[pulumi.Input[str]] = None,\n share_with_group_lock: Optional[pulumi.Input[bool]] = None,\n shared_runners_minutes_limit: Optional[pulumi.Input[int]] = None,\n subgroup_creation_level: Optional[pulumi.Input[str]] = None,\n two_factor_grace_period: Optional[pulumi.Input[int]] = None,\n visibility_level: Optional[pulumi.Input[str]] = None,\n web_url: Optional[pulumi.Input[str]] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _GroupState.__new__(_GroupState)\n\n __props__.__dict__[\"auto_devops_enabled\"] = auto_devops_enabled\n __props__.__dict__[\"avatar\"] = avatar\n __props__.__dict__[\"avatar_hash\"] = avatar_hash\n __props__.__dict__[\"avatar_url\"] = avatar_url\n __props__.__dict__[\"default_branch_protection\"] = default_branch_protection\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"emails_disabled\"] = emails_disabled\n __props__.__dict__[\"extra_shared_runners_minutes_limit\"] = extra_shared_runners_minutes_limit\n __props__.__dict__[\"full_name\"] = full_name\n __props__.__dict__[\"full_path\"] = full_path\n __props__.__dict__[\"ip_restriction_ranges\"] = ip_restriction_ranges\n __props__.__dict__[\"lfs_enabled\"] = lfs_enabled\n __props__.__dict__[\"membership_lock\"] = membership_lock\n __props__.__dict__[\"mentions_disabled\"] = mentions_disabled\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"parent_id\"] = parent_id\n __props__.__dict__[\"path\"] = path\n __props__.__dict__[\"prevent_forking_outside_group\"] = prevent_forking_outside_group\n __props__.__dict__[\"project_creation_level\"] = project_creation_level\n __props__.__dict__[\"request_access_enabled\"] = request_access_enabled\n __props__.__dict__[\"require_two_factor_authentication\"] = require_two_factor_authentication\n __props__.__dict__[\"runners_token\"] = runners_token\n __props__.__dict__[\"share_with_group_lock\"] = share_with_group_lock\n __props__.__dict__[\"shared_runners_minutes_limit\"] = shared_runners_minutes_limit\n __props__.__dict__[\"subgroup_creation_level\"] = subgroup_creation_level\n __props__.__dict__[\"two_factor_grace_period\"] = two_factor_grace_period\n __props__.__dict__[\"visibility_level\"] = visibility_level\n __props__.__dict__[\"web_url\"] = web_url\n return Group(resource_name, opts=opts, __props__=__props__)", "def test_check_resource(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.check_resource(b1), False)\n s1.add_resource(b1)\n self.assertEqual(s1.check_resource(b1), True)", "def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1", "def test_resource_combinations_rpc(\n self, ns_resource_factory, bucket_factory, platform1, platform2\n ):\n # Create the namespace resources and verify health\n ns_resource_name1 = ns_resource_factory(platform=platform1)[1]\n ns_resource_name2 = ns_resource_factory(platform=platform2)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name1,\n read_ns_resources=[ns_resource_name1, ns_resource_name2],\n )", "def __init__(__self__,\n resource_name: str,\n args: GroupArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resourceid(self):", "def resources(ctx, job, gpu):\n\n def get_experiment_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment.resources(\n user, project_name, _experiment, message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n def get_experiment_job_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment_job.resources(user,\n project_name,\n _experiment,\n _job,\n message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_resources()\n else:\n get_experiment_resources()", "def resources(self):\n return self._resources" ]
[ "0.65045244", "0.57495797", "0.57457536", "0.57339305", "0.57302135", "0.56577545", "0.5604689", "0.5548824", "0.5543633", "0.544657", "0.5439849", "0.5431293", "0.54241556", "0.5397201", "0.5390905", "0.5361256", "0.53534734", "0.5320252", "0.5290149", "0.52813005", "0.52755415", "0.52686965", "0.52658516", "0.5242054", "0.5235643", "0.52325654", "0.52225107", "0.52129996", "0.5209162", "0.5205705", "0.52028584", "0.5195101", "0.51806843", "0.5173122", "0.5168898", "0.5160744", "0.5151648", "0.5144692", "0.5123902", "0.5120821", "0.5116424", "0.511554", "0.51138335", "0.5101415", "0.50984716", "0.5089887", "0.5084148", "0.50690097", "0.5066449", "0.50639004", "0.50496423", "0.5048084", "0.5046116", "0.5041139", "0.5039381", "0.50348955", "0.50300485", "0.50154305", "0.5004402", "0.5001564", "0.5000474", "0.4994533", "0.49927565", "0.4989946", "0.4956565", "0.49423444", "0.4926046", "0.49230877", "0.49218813", "0.49201185", "0.4917506", "0.49170855", "0.49170855", "0.49170855", "0.49170855", "0.49163485", "0.49010316", "0.48984003", "0.48981312", "0.48948938", "0.48889622", "0.48886618", "0.4888369", "0.488586", "0.4883666", "0.4882168", "0.48798174", "0.4878542", "0.4876623", "0.4876124", "0.48707372", "0.48695785", "0.48671335", "0.4866816", "0.48632294", "0.4855181", "0.48504004", "0.4848696", "0.48483312", "0.48473904", "0.4842439" ]
0.0
-1
> For a sharded cluster instance, the bound ECS security group takes effect only for mongos nodes.
def modify_security_group_configuration_with_options( self, request: dds_20151201_models.ModifySecurityGroupConfigurationRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_group_id): query['SecurityGroupId'] = request.security_group_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifySecurityGroupConfiguration', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifySecurityGroupConfigurationResponse(), self.call_api(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def grant_grp_self_access ( ec2_conn, grp, start_port, end_port, protocol = 'tcp' ) :\n if not does_grp_rule_exist( grp, grp, start_port, end_port, protocol ) :\n grp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group_id = grp.id )", "def _test_overlapping_sec_grp_rules(self):\n initial_security_groups = []\n if self.stateless_sg:\n md_secgrp = self._create_security_group('metadata_secgrp')\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=md_secgrp['id'])\n initial_security_groups.append(\n {'name': md_secgrp['name']})\n client_ssh, _, vms = self.create_vm_testing_sec_grp(\n num_servers=2, security_groups=initial_security_groups)\n tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(\n num_servers=1, security_groups=initial_security_groups)\n srv_ssh = tmp_ssh[0]\n srv_vm = tmp_vm[0]\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=srv_vm['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n secgrps = []\n for i, vm in enumerate(vms):\n sg = self._create_security_group('secgrp-%d' % i)\n self.create_loginable_secgroup_rule(secgroup_id=sg['id'])\n port = self.client.list_ports(network_id=self.network['id'],\n device_id=vm['server']['id'])['ports'][0]\n self.client.update_port(port['id'], security_groups=[sg['id']])\n secgrps.append(sg)\n tcp_port = 3000\n rule_list = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port,\n 'remote_group_id': secgrps[0]['id']},\n {'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port + 2,\n 'remote_group_id': secgrps[1]['id']}]\n self.client.update_port(srv_port['id'],\n security_groups=[secgrps[0]['id'], secgrps[1]['id']])\n self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])\n\n if self.stateless_sg:\n # NOTE(slaweq): in case of stateless SG, client needs to have also\n # rule which will explicitly accept ingress TCP connections which\n # will be replies from the TCP server so it will use random\n # destination port (depends on the src port choosen by client while\n # establishing connection)\n self.create_security_group_rule(\n security_group_id=secgrps[0]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n self.create_security_group_rule(\n security_group_id=secgrps[1]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n\n # The conntrack entries are ruled by the OF definitions but conntrack\n # status can change the datapath. Let's check the rules in two\n # attempts\n for _ in range(2):\n with utils.StatefulConnection(\n client_ssh[0], srv_ssh, srv_ip, tcp_port) as con:\n con.test_connection()\n for port in range(tcp_port, tcp_port + 3):\n with utils.StatefulConnection(\n client_ssh[1], srv_ssh, srv_ip, port) as con:\n con.test_connection()", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def check_security_group(self):\n return True", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def test_aws_service_api_security_groups_get(self):\n pass", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def auth_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def test_break_security_group_usual_case_specify_sg():", "def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def do_add_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.add_security_group(**opts)\n print(\"Request to add security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Add security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def list_secgroups(self, name=None):", "def change_instance_security_groups(instance_id, security_group_ids):\n\n # Retrieve the IDs of the network interfaces attached to the EC2 instance\n ec2_client = boto3.client('ec2')\n try:\n response = ec2_client.describe_instances(InstanceIds=[instance_id])\n except ClientError as e:\n logging.error(e)\n return False\n instance_info = response['Reservations'][0]['Instances'][0]\n\n # Assign the security groups to each network interface\n for network_interface in instance_info['NetworkInterfaces']:\n try:\n ec2_client.modify_network_interface_attribute(\n NetworkInterfaceId=network_interface['NetworkInterfaceId'],\n Groups=security_group_ids)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def cluster(self):\n assert False", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def test_patch_cluster_role(self):\n pass", "def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )", "def test_secgroup_propagation_local_override(self):\n\n # Needs love - not idempotent\n unit = self.compute_sentry\n if self._get_openstack_release() >= self.trusty_mitaka:\n conf = \"/etc/neutron/plugins/ml2/openvswitch_agent.ini\"\n else:\n conf = \"/etc/neutron/plugins/ml2/ml2_conf.ini\"\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'False'})\n msg = \"Propagation error, expected %s=%s\" % ('enable_security_group',\n 'False')\n self.process_ret(ret=ret, message=msg)\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'False'})\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'True'})", "def test_replace_cluster_role(self):\n pass", "def test_positive_cgroups(self):\n # Test parsing \"cpuset.cpus\" file\n self.assertEqual(self.computer._manager_list[0]._cpu_id_list(), self.cpu_list)\n # This should created per-cpu groups and move all tasks in CPU pool into cpu0\n self.computer.format(alter_network=False, alter_user=False)\n # Test files creation for exclusive CPUs\n for cpu_id in self.cpu_list:\n cpu_n_path = os.path.join(self.cpuset_path, \"cpu\" + str(cpu_id))\n self.assertEqual(str(cpu_id), file_content(os.path.join(cpu_n_path, \"cpuset.cpus\")))\n self.assertEqual(\"1\", file_content(os.path.join(cpu_n_path, \"cpuset.cpu_exclusive\")))\n if cpu_id > 0:\n self.assertEqual(\"\", file_content(os.path.join(cpu_n_path, \"tasks\")))\n\n # Test moving tasks from generic core to private core\n # request PID 1001 to be moved to its private CPU\n request_file_path = os.path.join(self.computer.partition_list[0].path,\n slapos.manager.cpuset.Manager.cpu_exclusive_file)\n file_write(\"1001\\n\", request_file_path)\n # Simulate slapos instance call to perform the actual movement\n self.computer._manager_list[0].instance(\n SlapGridPartitionMock(self.computer.partition_list[0]))\n # Simulate cgroup behaviour - empty tasks in the pool\n file_write(\"\", os.path.join(self.cpuset_path, \"tasks\"))\n # Test that format moved all PIDs from CPU pool into CPU0\n tasks_at_cpu0 = file_content(os.path.join(self.cpuset_path, \"cpu0\", \"tasks\")).split()\n self.assertIn(\"1000\", tasks_at_cpu0)\n # test if the moving suceeded into any provate CPUS (id>0)\n self.assertTrue(any(\"1001\" in file_content(exclusive_task)\n for exclusive_task in glob.glob(os.path.join(self.cpuset_path, \"cpu[1-9]\", \"tasks\"))))\n self.assertIn(\"1002\", tasks_at_cpu0)\n # slapformat should remove successfully moved PIDs from the .slapos-cpu-exclusive file\n self.assertEqual(\"\", file_content(request_file_path).strip())", "def capacitygroup_group():", "def do_remove_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.remove_security_group(**opts)\n print(\"Request to remove security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Remove security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def auth_secgroupeg(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_egress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Egress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def _set_app_security_group(self, security_group):\n pass", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def test_replace_cluster_resource_quota(self):\n pass", "def authorize_cluster_access(IpAddress='0.0.0.0/0'):\n\n ec2_client = boto3.client('ec2')\n\n # Redshift uses port 5439 by default. If Redshift was configured to use\n # a different port, specify the FromPort= and ToPort= arguments accordingly.\n try:\n ec2_client.authorize_security_group_ingress(GroupName='default',\n IpProtocol='tcp',\n FromPort=5439,\n ToPort=5439,\n CidrIp=IpAddress)\n except ClientError as e:\n print(f'ERROR: {e}')\n return False\n return True", "def remove_rds_security_group(payload):\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n action = payload.get(\"action\")\n version = payload.get(\"version\")\n result_data = {}\n\n code = 0\n msg = 'Success'\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n for rds_id in rds_ids:\n sg_results_succ = []\n for sg_id in sg_ids:\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n payload.update({\"server\": rds_ins_uuid})\n payload.update({\"security_group\": sg_uuid})\n payload.update({\"version\": version})\n payload.update({\"action\": action})\n # resp = api.get(payload=payload, timeout=10)\n resp = api.get(payload=payload)\n\n if resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = resp.get(\"msg\")\n # sg_results.update({sg_id: \"failed\"})\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot remove from rds with rds_id %s\" + rds_id)\n else:\n try:\n rds_record.sg = None\n rds_record.save()\n except Exception as exp:\n logger.error(\"save removal of security group to db fail,\"\n \"{}\".format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})", "def security_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group\")", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def app_security_group_management(self) -> pulumi.Output[Optional['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def sg_rule_sets_by_rds(rds, ec2, account, region, output_bucket):\n \"\"\"generate list of rds instances\"\"\"\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n \"\"\"generate list of security groups to get rule set details\"\"\"\n sg_list = ec2.describe_security_groups().get('SecurityGroups')\n\n for sg_obj in sg_list:\n \"\"\"find out how many rdss are using a security group\"\"\"\n for rds_obj in rds_list:\n for rdssg in rds_obj.get('VpcSecurityGroups'):\n \"\"\"check if security group is associated to rds instance\"\"\"\n if sg_obj.get('GroupId') == rdssg.get('VpcSecurityGroupId'):\n \n \"\"\"move on to rule entries\"\"\"\n for rule in sg_obj.get('IpPermissions'):\n \"\"\"cidr as source\"\"\"\n for cidr in rule.get('IpRanges'):\n if cidr.get('CidrIp'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(str(cidr.get('CidrIp'))),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))\n\n \"\"\"security groups as source\"\"\"\n for group in rule.get('UserIdGroupPairs'):\n if group.get('GroupId'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(group.get('GroupId')),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp", "def get_lb_secgrp_type ( app_name ) :\n return app_name.upper( ) + '-LB'", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def reserve_group(self, name, sco):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.reserve_group(name, sco)", "def security_groups(self):\n return int(self.get('security_group_rules'))", "def app_security_group_management(self) -> Optional[pulumi.Input['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def update_instance_security_group(self, instance_id,\n new_security_group_ids):\n ports = port_list(self.request, device_id=instance_id)\n for p in ports:\n params = {'security_groups': new_security_group_ids}\n port_update(self.request, p.id, **params)", "def prevent_forking_outside_group(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def get_sg():\n client = boto3.client('ec2')\n all_instances = client.describe_instances()\n all_sg = client.describe_security_groups()\n\n instance_sg_set = set()\n sg_set = set()\n\n for reservation in all_instances[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n for sg in instance[\"SecurityGroups\"]:\n instance_sg_set.add(sg[\"GroupName\"])\n\n for security_group in all_sg[\"SecurityGroups\"]:\n sg_set.add(security_group[\"GroupName\"])\n\n idle_sg = sg_set - instance_sg_set\n\n return idle_sg", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def AddSecurityGroupEntry(self, security_group, host=None, port=None):\n if self._conn:\n security_groups = self._conn.get_all_security_groups(groupnames=security_group)\n for sg in security_groups:\n if sg.name == security_group:\n return self._conn.authorize_security_group(sg.name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip='%s/32' % host)", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def privilegedStartService(self):\n super(GroupOwnedUNIXServer, self).privilegedStartService()\n\n # Unfortunately, there's no public way to access this. -glyph\n fileName = self._port.port\n chown(fileName, getuid(), self.gid)", "def test_replace_cluster_policy(self):\n pass", "def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name", "def save_security_group(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n name = payload.get(\"description\")\n sg_id = payload.get(\"sg_id\")\n zone_name = payload.get(\"zone\")\n user_name = payload.get(\"owner\")\n zone = ZoneModel.get_zone_by_name(zone_name)\n user = User.objects.get(username=user_name)\n _security_group_ins, err = RdsSecurityGroupModel.objects.create(uuid,\n sg_id,\n name,\n zone,\n user)\n return _security_group_ins, err", "def test_break_security_group_usual_case():", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def test_break_security_group_failed():", "def sp_cpu_exclusive_on(cpuset_info):\n if cpuset_info['storpool.slice']['cpuset.cpu_exclusive'] != 1:\n return ['cpuset:storpool.slice cpu_exclusive is not 1'], []\n return [], []", "def authorize(self, cidr_ip=None, ec2_group=None):\r\n if isinstance(ec2_group, SecurityGroup):\r\n group_name = ec2_group.name\r\n group_owner_id = ec2_group.owner_id\r\n else:\r\n group_name = None\r\n group_owner_id = None\r\n return self.connection.authorize_dbsecurity_group(self.name,\r\n cidr_ip,\r\n group_name,\r\n group_owner_id)", "def _test_multiple_ports_secgroup_inheritance(self):\n # create a security group and make it loginable and pingable\n secgrp = self._create_security_group('secgrp')\n self.create_loginable_secgroup_rule(\n secgroup_id=secgrp['id'])\n self.create_pingable_secgroup_rule(\n secgroup_id=secgrp['id'])\n if self.stateless_sg:\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=secgrp['id'])\n # create two ports with fixed IPs and the security group created\n ports = []\n for i in range(2):\n ports.append(self.create_port(\n self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],\n security_groups=[secgrp['id']]))\n # spawn instances with the ports created\n server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n ports=ports)\n # verify ICMP reachability and ssh connectivity\n for fip in fips:\n self.ping_ip_address(fip['floating_ip_address'])\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])", "def prod_load_balancer_sg_valid(self) -> None:\n if self.prod_env:\n sg_name = 'saints-xctf-prod-server-elb-security-group'\n else:\n sg_name = 'saints-xctf-dev-server-elb-security-group'\n\n response = self.ec2.describe_security_groups(Filters=[\n {\n 'Name': 'group-name',\n 'Values': [sg_name]\n }\n ])\n\n security_group = response.get('SecurityGroups')[0]\n\n self.assertTrue(all([\n security_group.get('GroupName') == sg_name,\n self.validate_load_balancer_sg_rules(\n security_group.get('IpPermissions'),\n security_group.get('IpPermissionsEgress')\n )\n ]))", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def test_placement_group(ray_start_2_cpus):\n num_workers = 2\n bundle = {\"CPU\": 1}\n bundles = [bundle.copy() for _ in range(num_workers)]\n placement_group = ray.util.placement_group(bundles)\n wg = WorkerGroup(num_workers=num_workers, placement_group=placement_group)\n wg.remove_workers([0])\n wg.add_workers(1)", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def grant_ssh_access ( ec2_conn, tgt_grps, nat_grp ) :\n for grp in tgt_grps :\n grant_grp_access( ec2_conn, [ nat_grp ], grp, 22 )", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def test_replace_cluster_role_binding(self):\n pass", "def _remove_server_from_sgroup(**kwargs):\n # A10 Lightning APIs\n ServerGrpApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpPolicyApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}/policies\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpImportApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/_import\".format(\n kwargs['applicationId'], kwargs['hostId'], kwargs['serviceId'])\n\n # Build the requests\n request1 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpApi)\n request2 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpPolicyApi)\n request3 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpImportApi)\n\n # Auth header\n cred = A10User + ':' + A10UserPassword\n bas64 = b64encode(bytes(cred))\n auth = \"Basic \" + bas64.decode(\"ascii\")\n \n # Complete header dict\n headers = {\n \"provider\": \"root\",\n \"tenant\": A10Tenant,\n \"Content-Type\": \"application/json\",\n \"Authorization\": auth\n }\n\n # Attach all the headers to the requests\n for key, value in headers.items():\n request1.add_header(key, value)\n request2.add_header(key, value)\n request3.add_header(key, value)\n\n # First retrieve the server group data\n response = urllib2.urlopen(request1)\n server_grp_data = json.loads(response.read().decode(\"utf-8\"))\n servers = server_grp_data['servers']\n \n # Remove the required server\n for serv in servers:\n if serv['ipAddress'] == _get_public_ip_addr(ServerInstanceID):\n servers.remove(serv)\n\n # Get server group policies\n response = urllib2.urlopen(request2)\n srv_policies = json.loads(response.read().decode(\"utf-8\"))\n \n # Add parsed server data and server group policies and post it\n server_grp_data['servers'] = servers\n server_grp_data['policies'] = srv_policies\n urllib2.urlopen(request3, json.dumps(server_grp_data).encode(\"utf-8\"))", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def allow_sgcmd(params, sg, protocol_str, cidr, egress, force):\n ec2 = get_ec2_connection()\n sg_obj = convert_sg_name_to_sg(sg)\n protocol, target_port_low, target_port_high = parse_protocol_port_string(protocol_str)\n if is_debugging:\n print(\"Protocol %s\" % protocol)\n print(\"Target port %s to %s\" % (target_port_low, target_port_high))\n print(\"Source ID %s\" % cidr)\n cidr = expand_cidr_string(cidr)\n if egress:\n sg_obj.authorize_egress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )\n else:\n sg_obj.authorize_ingress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )", "def _split_ns_by_scatter(cls,\n shard_count,\n namespace,\n raw_entity_kind,\n app):\n if shard_count == 1:\n\n return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n ds_query = datastore.Query(kind=raw_entity_kind,\n namespace=namespace,\n _app=app,\n keys_only=True)\n ds_query.Order(\"__scatter__\")\n oversampling_factor = 32\n random_keys = ds_query.Get(shard_count * oversampling_factor)\n\n if not random_keys:\n\n\n return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n [None] * (shard_count - 1))\n\n random_keys.sort()\n\n if len(random_keys) >= shard_count:\n\n random_keys = cls._choose_split_points(random_keys, shard_count)\n\n k_ranges = []\n\n k_ranges.append(key_range.KeyRange(\n key_start=None,\n key_end=random_keys[0],\n direction=key_range.KeyRange.ASC,\n include_start=False,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n for i in range(0, len(random_keys) - 1):\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[i],\n key_end=random_keys[i+1],\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[-1],\n key_end=None,\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n if len(k_ranges) < shard_count:\n\n k_ranges += [None] * (shard_count - len(k_ranges))\n return k_ranges", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def is_sgw_ce_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"sg_ce\"]\n except KeyError:\n return False", "def test_patch_cluster_resource_quota(self):\n pass", "def test_intra_sg_isolation(self):\n # create a security group and make it loginable\n secgrp = self._create_security_group('secgrp')\n\n # remove all rules and add ICMP, DHCP and metadata as egress,\n # and ssh as ingress.\n for sgr in secgrp['security_group_rules']:\n self.client.delete_security_group_rule(sgr['id'])\n\n self.create_loginable_secgroup_rule(secgroup_id=secgrp['id'])\n rule_list = [{'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_TCP,\n 'remote_ip_prefix': '169.254.169.254/32',\n 'description': 'metadata out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_UDP,\n 'port_range_min': '67',\n 'port_range_max': '67',\n 'description': 'dhcpv4 out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_ICMP,\n 'description': 'ping out',\n },\n ]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # go vms, go!\n ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n num_servers=2,\n security_groups=[{'name': secgrp['name']}])\n\n # verify SSH functionality. This will ensure that servers were\n # able to reach dhcp + metadata servers\n for fip in fips:\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])\n\n # try to ping instances without intra SG permission (should fail)\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'],\n should_succeed=False)\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'],\n should_succeed=False)\n\n # add intra sg rule. This will allow packets from servers that\n # are in the same sg\n rule_list = [{'direction': constants.INGRESS_DIRECTION,\n 'remote_group_id': secgrp['id']}]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # try to ping instances with intra SG permission\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'])\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'])", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def test_patch_cluster_policy(self):\n pass", "def security_group_id_for_domain_boundary(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id_for_domain_boundary\")", "def modify_security_group_configuration(\n self,\n request: dds_20151201_models.ModifySecurityGroupConfigurationRequest,\n ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_security_group_configuration_with_options(request, runtime)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass" ]
[ "0.6038478", "0.5949765", "0.5760521", "0.57362705", "0.56504565", "0.55500543", "0.5461943", "0.5421799", "0.54184514", "0.54021674", "0.5399247", "0.5391312", "0.53637403", "0.53541446", "0.5313645", "0.5262325", "0.5229808", "0.5227352", "0.5222983", "0.52090144", "0.51645213", "0.5156898", "0.5144462", "0.5139043", "0.5112529", "0.51054627", "0.5088506", "0.5086639", "0.50759476", "0.5059506", "0.5055603", "0.50483954", "0.5031053", "0.50303584", "0.50282776", "0.50231355", "0.50219303", "0.50201464", "0.49933556", "0.49889466", "0.49763265", "0.49709225", "0.4965626", "0.49438238", "0.49321052", "0.49239254", "0.49232367", "0.49133566", "0.48981646", "0.4896572", "0.48893136", "0.48820457", "0.48764816", "0.48635265", "0.4861763", "0.4857638", "0.4848043", "0.48446634", "0.48446634", "0.48336956", "0.48322532", "0.48256296", "0.4818407", "0.4813093", "0.4809709", "0.48063126", "0.48060122", "0.4804232", "0.47931102", "0.47930574", "0.47906664", "0.47900212", "0.4785427", "0.47841725", "0.4770986", "0.47692195", "0.4769016", "0.47670683", "0.47666472", "0.47656783", "0.47656783", "0.47557762", "0.47557762", "0.47549793", "0.474458", "0.474099", "0.4740735", "0.4735618", "0.47232208", "0.4722722", "0.47196797", "0.47187135", "0.4718074", "0.47133362", "0.47099268", "0.46878302", "0.46873716", "0.4685415", "0.46851218", "0.46838224", "0.46749285" ]
0.0
-1
> For a sharded cluster instance, the bound ECS security group takes effect only for mongos nodes.
async def modify_security_group_configuration_with_options_async( self, request: dds_20151201_models.ModifySecurityGroupConfigurationRequest, runtime: util_models.RuntimeOptions, ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse: UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.dbinstance_id): query['DBInstanceId'] = request.dbinstance_id if not UtilClient.is_unset(request.owner_account): query['OwnerAccount'] = request.owner_account if not UtilClient.is_unset(request.owner_id): query['OwnerId'] = request.owner_id if not UtilClient.is_unset(request.resource_owner_account): query['ResourceOwnerAccount'] = request.resource_owner_account if not UtilClient.is_unset(request.resource_owner_id): query['ResourceOwnerId'] = request.resource_owner_id if not UtilClient.is_unset(request.security_group_id): query['SecurityGroupId'] = request.security_group_id if not UtilClient.is_unset(request.security_token): query['SecurityToken'] = request.security_token req = open_api_models.OpenApiRequest( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='ModifySecurityGroupConfiguration', version='2015-12-01', protocol='HTTPS', pathname='/', method='POST', auth_type='AK', style='RPC', req_body_type='formData', body_type='json' ) return TeaCore.from_map( dds_20151201_models.ModifySecurityGroupConfigurationResponse(), await self.call_api_async(params, req, runtime) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def grant_grp_self_access ( ec2_conn, grp, start_port, end_port, protocol = 'tcp' ) :\n if not does_grp_rule_exist( grp, grp, start_port, end_port, protocol ) :\n grp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group_id = grp.id )", "def _test_overlapping_sec_grp_rules(self):\n initial_security_groups = []\n if self.stateless_sg:\n md_secgrp = self._create_security_group('metadata_secgrp')\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=md_secgrp['id'])\n initial_security_groups.append(\n {'name': md_secgrp['name']})\n client_ssh, _, vms = self.create_vm_testing_sec_grp(\n num_servers=2, security_groups=initial_security_groups)\n tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(\n num_servers=1, security_groups=initial_security_groups)\n srv_ssh = tmp_ssh[0]\n srv_vm = tmp_vm[0]\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=srv_vm['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n secgrps = []\n for i, vm in enumerate(vms):\n sg = self._create_security_group('secgrp-%d' % i)\n self.create_loginable_secgroup_rule(secgroup_id=sg['id'])\n port = self.client.list_ports(network_id=self.network['id'],\n device_id=vm['server']['id'])['ports'][0]\n self.client.update_port(port['id'], security_groups=[sg['id']])\n secgrps.append(sg)\n tcp_port = 3000\n rule_list = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port,\n 'remote_group_id': secgrps[0]['id']},\n {'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port + 2,\n 'remote_group_id': secgrps[1]['id']}]\n self.client.update_port(srv_port['id'],\n security_groups=[secgrps[0]['id'], secgrps[1]['id']])\n self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])\n\n if self.stateless_sg:\n # NOTE(slaweq): in case of stateless SG, client needs to have also\n # rule which will explicitly accept ingress TCP connections which\n # will be replies from the TCP server so it will use random\n # destination port (depends on the src port choosen by client while\n # establishing connection)\n self.create_security_group_rule(\n security_group_id=secgrps[0]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n self.create_security_group_rule(\n security_group_id=secgrps[1]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n\n # The conntrack entries are ruled by the OF definitions but conntrack\n # status can change the datapath. Let's check the rules in two\n # attempts\n for _ in range(2):\n with utils.StatefulConnection(\n client_ssh[0], srv_ssh, srv_ip, tcp_port) as con:\n con.test_connection()\n for port in range(tcp_port, tcp_port + 3):\n with utils.StatefulConnection(\n client_ssh[1], srv_ssh, srv_ip, port) as con:\n con.test_connection()", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def check_security_group(self):\n return True", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def test_aws_service_api_security_groups_get(self):\n pass", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def auth_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def test_break_security_group_usual_case_specify_sg():", "def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def do_add_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.add_security_group(**opts)\n print(\"Request to add security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Add security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def list_secgroups(self, name=None):", "def change_instance_security_groups(instance_id, security_group_ids):\n\n # Retrieve the IDs of the network interfaces attached to the EC2 instance\n ec2_client = boto3.client('ec2')\n try:\n response = ec2_client.describe_instances(InstanceIds=[instance_id])\n except ClientError as e:\n logging.error(e)\n return False\n instance_info = response['Reservations'][0]['Instances'][0]\n\n # Assign the security groups to each network interface\n for network_interface in instance_info['NetworkInterfaces']:\n try:\n ec2_client.modify_network_interface_attribute(\n NetworkInterfaceId=network_interface['NetworkInterfaceId'],\n Groups=security_group_ids)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def cluster(self):\n assert False", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def test_patch_cluster_role(self):\n pass", "def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )", "def test_secgroup_propagation_local_override(self):\n\n # Needs love - not idempotent\n unit = self.compute_sentry\n if self._get_openstack_release() >= self.trusty_mitaka:\n conf = \"/etc/neutron/plugins/ml2/openvswitch_agent.ini\"\n else:\n conf = \"/etc/neutron/plugins/ml2/ml2_conf.ini\"\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'False'})\n msg = \"Propagation error, expected %s=%s\" % ('enable_security_group',\n 'False')\n self.process_ret(ret=ret, message=msg)\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'False'})\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'True'})", "def test_replace_cluster_role(self):\n pass", "def test_positive_cgroups(self):\n # Test parsing \"cpuset.cpus\" file\n self.assertEqual(self.computer._manager_list[0]._cpu_id_list(), self.cpu_list)\n # This should created per-cpu groups and move all tasks in CPU pool into cpu0\n self.computer.format(alter_network=False, alter_user=False)\n # Test files creation for exclusive CPUs\n for cpu_id in self.cpu_list:\n cpu_n_path = os.path.join(self.cpuset_path, \"cpu\" + str(cpu_id))\n self.assertEqual(str(cpu_id), file_content(os.path.join(cpu_n_path, \"cpuset.cpus\")))\n self.assertEqual(\"1\", file_content(os.path.join(cpu_n_path, \"cpuset.cpu_exclusive\")))\n if cpu_id > 0:\n self.assertEqual(\"\", file_content(os.path.join(cpu_n_path, \"tasks\")))\n\n # Test moving tasks from generic core to private core\n # request PID 1001 to be moved to its private CPU\n request_file_path = os.path.join(self.computer.partition_list[0].path,\n slapos.manager.cpuset.Manager.cpu_exclusive_file)\n file_write(\"1001\\n\", request_file_path)\n # Simulate slapos instance call to perform the actual movement\n self.computer._manager_list[0].instance(\n SlapGridPartitionMock(self.computer.partition_list[0]))\n # Simulate cgroup behaviour - empty tasks in the pool\n file_write(\"\", os.path.join(self.cpuset_path, \"tasks\"))\n # Test that format moved all PIDs from CPU pool into CPU0\n tasks_at_cpu0 = file_content(os.path.join(self.cpuset_path, \"cpu0\", \"tasks\")).split()\n self.assertIn(\"1000\", tasks_at_cpu0)\n # test if the moving suceeded into any provate CPUS (id>0)\n self.assertTrue(any(\"1001\" in file_content(exclusive_task)\n for exclusive_task in glob.glob(os.path.join(self.cpuset_path, \"cpu[1-9]\", \"tasks\"))))\n self.assertIn(\"1002\", tasks_at_cpu0)\n # slapformat should remove successfully moved PIDs from the .slapos-cpu-exclusive file\n self.assertEqual(\"\", file_content(request_file_path).strip())", "def capacitygroup_group():", "def do_remove_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.remove_security_group(**opts)\n print(\"Request to remove security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Remove security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def auth_secgroupeg(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_egress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Egress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def _set_app_security_group(self, security_group):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def authorize_cluster_access(IpAddress='0.0.0.0/0'):\n\n ec2_client = boto3.client('ec2')\n\n # Redshift uses port 5439 by default. If Redshift was configured to use\n # a different port, specify the FromPort= and ToPort= arguments accordingly.\n try:\n ec2_client.authorize_security_group_ingress(GroupName='default',\n IpProtocol='tcp',\n FromPort=5439,\n ToPort=5439,\n CidrIp=IpAddress)\n except ClientError as e:\n print(f'ERROR: {e}')\n return False\n return True", "def remove_rds_security_group(payload):\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n action = payload.get(\"action\")\n version = payload.get(\"version\")\n result_data = {}\n\n code = 0\n msg = 'Success'\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n for rds_id in rds_ids:\n sg_results_succ = []\n for sg_id in sg_ids:\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n payload.update({\"server\": rds_ins_uuid})\n payload.update({\"security_group\": sg_uuid})\n payload.update({\"version\": version})\n payload.update({\"action\": action})\n # resp = api.get(payload=payload, timeout=10)\n resp = api.get(payload=payload)\n\n if resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = resp.get(\"msg\")\n # sg_results.update({sg_id: \"failed\"})\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot remove from rds with rds_id %s\" + rds_id)\n else:\n try:\n rds_record.sg = None\n rds_record.save()\n except Exception as exp:\n logger.error(\"save removal of security group to db fail,\"\n \"{}\".format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})", "def security_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group\")", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def app_security_group_management(self) -> pulumi.Output[Optional['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def sg_rule_sets_by_rds(rds, ec2, account, region, output_bucket):\n \"\"\"generate list of rds instances\"\"\"\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n \"\"\"generate list of security groups to get rule set details\"\"\"\n sg_list = ec2.describe_security_groups().get('SecurityGroups')\n\n for sg_obj in sg_list:\n \"\"\"find out how many rdss are using a security group\"\"\"\n for rds_obj in rds_list:\n for rdssg in rds_obj.get('VpcSecurityGroups'):\n \"\"\"check if security group is associated to rds instance\"\"\"\n if sg_obj.get('GroupId') == rdssg.get('VpcSecurityGroupId'):\n \n \"\"\"move on to rule entries\"\"\"\n for rule in sg_obj.get('IpPermissions'):\n \"\"\"cidr as source\"\"\"\n for cidr in rule.get('IpRanges'):\n if cidr.get('CidrIp'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(str(cidr.get('CidrIp'))),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))\n\n \"\"\"security groups as source\"\"\"\n for group in rule.get('UserIdGroupPairs'):\n if group.get('GroupId'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(group.get('GroupId')),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))", "def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def get_lb_secgrp_type ( app_name ) :\n return app_name.upper( ) + '-LB'", "def reserve_group(self, name, sco):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.reserve_group(name, sco)", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def security_groups(self):\n return int(self.get('security_group_rules'))", "def app_security_group_management(self) -> Optional[pulumi.Input['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def update_instance_security_group(self, instance_id,\n new_security_group_ids):\n ports = port_list(self.request, device_id=instance_id)\n for p in ports:\n params = {'security_groups': new_security_group_ids}\n port_update(self.request, p.id, **params)", "def prevent_forking_outside_group(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def get_sg():\n client = boto3.client('ec2')\n all_instances = client.describe_instances()\n all_sg = client.describe_security_groups()\n\n instance_sg_set = set()\n sg_set = set()\n\n for reservation in all_instances[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n for sg in instance[\"SecurityGroups\"]:\n instance_sg_set.add(sg[\"GroupName\"])\n\n for security_group in all_sg[\"SecurityGroups\"]:\n sg_set.add(security_group[\"GroupName\"])\n\n idle_sg = sg_set - instance_sg_set\n\n return idle_sg", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def AddSecurityGroupEntry(self, security_group, host=None, port=None):\n if self._conn:\n security_groups = self._conn.get_all_security_groups(groupnames=security_group)\n for sg in security_groups:\n if sg.name == security_group:\n return self._conn.authorize_security_group(sg.name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip='%s/32' % host)", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def privilegedStartService(self):\n super(GroupOwnedUNIXServer, self).privilegedStartService()\n\n # Unfortunately, there's no public way to access this. -glyph\n fileName = self._port.port\n chown(fileName, getuid(), self.gid)", "def test_replace_cluster_policy(self):\n pass", "def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name", "def save_security_group(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n name = payload.get(\"description\")\n sg_id = payload.get(\"sg_id\")\n zone_name = payload.get(\"zone\")\n user_name = payload.get(\"owner\")\n zone = ZoneModel.get_zone_by_name(zone_name)\n user = User.objects.get(username=user_name)\n _security_group_ins, err = RdsSecurityGroupModel.objects.create(uuid,\n sg_id,\n name,\n zone,\n user)\n return _security_group_ins, err", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def test_break_security_group_usual_case():", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def sp_cpu_exclusive_on(cpuset_info):\n if cpuset_info['storpool.slice']['cpuset.cpu_exclusive'] != 1:\n return ['cpuset:storpool.slice cpu_exclusive is not 1'], []\n return [], []", "def test_break_security_group_failed():", "def _test_multiple_ports_secgroup_inheritance(self):\n # create a security group and make it loginable and pingable\n secgrp = self._create_security_group('secgrp')\n self.create_loginable_secgroup_rule(\n secgroup_id=secgrp['id'])\n self.create_pingable_secgroup_rule(\n secgroup_id=secgrp['id'])\n if self.stateless_sg:\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=secgrp['id'])\n # create two ports with fixed IPs and the security group created\n ports = []\n for i in range(2):\n ports.append(self.create_port(\n self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],\n security_groups=[secgrp['id']]))\n # spawn instances with the ports created\n server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n ports=ports)\n # verify ICMP reachability and ssh connectivity\n for fip in fips:\n self.ping_ip_address(fip['floating_ip_address'])\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])", "def authorize(self, cidr_ip=None, ec2_group=None):\r\n if isinstance(ec2_group, SecurityGroup):\r\n group_name = ec2_group.name\r\n group_owner_id = ec2_group.owner_id\r\n else:\r\n group_name = None\r\n group_owner_id = None\r\n return self.connection.authorize_dbsecurity_group(self.name,\r\n cidr_ip,\r\n group_name,\r\n group_owner_id)", "def prod_load_balancer_sg_valid(self) -> None:\n if self.prod_env:\n sg_name = 'saints-xctf-prod-server-elb-security-group'\n else:\n sg_name = 'saints-xctf-dev-server-elb-security-group'\n\n response = self.ec2.describe_security_groups(Filters=[\n {\n 'Name': 'group-name',\n 'Values': [sg_name]\n }\n ])\n\n security_group = response.get('SecurityGroups')[0]\n\n self.assertTrue(all([\n security_group.get('GroupName') == sg_name,\n self.validate_load_balancer_sg_rules(\n security_group.get('IpPermissions'),\n security_group.get('IpPermissionsEgress')\n )\n ]))", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def test_placement_group(ray_start_2_cpus):\n num_workers = 2\n bundle = {\"CPU\": 1}\n bundles = [bundle.copy() for _ in range(num_workers)]\n placement_group = ray.util.placement_group(bundles)\n wg = WorkerGroup(num_workers=num_workers, placement_group=placement_group)\n wg.remove_workers([0])\n wg.add_workers(1)", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def grant_ssh_access ( ec2_conn, tgt_grps, nat_grp ) :\n for grp in tgt_grps :\n grant_grp_access( ec2_conn, [ nat_grp ], grp, 22 )", "def test_replace_cluster_role_binding(self):\n pass", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def _remove_server_from_sgroup(**kwargs):\n # A10 Lightning APIs\n ServerGrpApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpPolicyApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}/policies\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpImportApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/_import\".format(\n kwargs['applicationId'], kwargs['hostId'], kwargs['serviceId'])\n\n # Build the requests\n request1 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpApi)\n request2 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpPolicyApi)\n request3 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpImportApi)\n\n # Auth header\n cred = A10User + ':' + A10UserPassword\n bas64 = b64encode(bytes(cred))\n auth = \"Basic \" + bas64.decode(\"ascii\")\n \n # Complete header dict\n headers = {\n \"provider\": \"root\",\n \"tenant\": A10Tenant,\n \"Content-Type\": \"application/json\",\n \"Authorization\": auth\n }\n\n # Attach all the headers to the requests\n for key, value in headers.items():\n request1.add_header(key, value)\n request2.add_header(key, value)\n request3.add_header(key, value)\n\n # First retrieve the server group data\n response = urllib2.urlopen(request1)\n server_grp_data = json.loads(response.read().decode(\"utf-8\"))\n servers = server_grp_data['servers']\n \n # Remove the required server\n for serv in servers:\n if serv['ipAddress'] == _get_public_ip_addr(ServerInstanceID):\n servers.remove(serv)\n\n # Get server group policies\n response = urllib2.urlopen(request2)\n srv_policies = json.loads(response.read().decode(\"utf-8\"))\n \n # Add parsed server data and server group policies and post it\n server_grp_data['servers'] = servers\n server_grp_data['policies'] = srv_policies\n urllib2.urlopen(request3, json.dumps(server_grp_data).encode(\"utf-8\"))", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def allow_sgcmd(params, sg, protocol_str, cidr, egress, force):\n ec2 = get_ec2_connection()\n sg_obj = convert_sg_name_to_sg(sg)\n protocol, target_port_low, target_port_high = parse_protocol_port_string(protocol_str)\n if is_debugging:\n print(\"Protocol %s\" % protocol)\n print(\"Target port %s to %s\" % (target_port_low, target_port_high))\n print(\"Source ID %s\" % cidr)\n cidr = expand_cidr_string(cidr)\n if egress:\n sg_obj.authorize_egress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )\n else:\n sg_obj.authorize_ingress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )", "def _split_ns_by_scatter(cls,\n shard_count,\n namespace,\n raw_entity_kind,\n app):\n if shard_count == 1:\n\n return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n ds_query = datastore.Query(kind=raw_entity_kind,\n namespace=namespace,\n _app=app,\n keys_only=True)\n ds_query.Order(\"__scatter__\")\n oversampling_factor = 32\n random_keys = ds_query.Get(shard_count * oversampling_factor)\n\n if not random_keys:\n\n\n return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n [None] * (shard_count - 1))\n\n random_keys.sort()\n\n if len(random_keys) >= shard_count:\n\n random_keys = cls._choose_split_points(random_keys, shard_count)\n\n k_ranges = []\n\n k_ranges.append(key_range.KeyRange(\n key_start=None,\n key_end=random_keys[0],\n direction=key_range.KeyRange.ASC,\n include_start=False,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n for i in range(0, len(random_keys) - 1):\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[i],\n key_end=random_keys[i+1],\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[-1],\n key_end=None,\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n if len(k_ranges) < shard_count:\n\n k_ranges += [None] * (shard_count - len(k_ranges))\n return k_ranges", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def is_sgw_ce_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"sg_ce\"]\n except KeyError:\n return False", "def test_patch_cluster_resource_quota(self):\n pass", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id_for_domain_boundary(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id_for_domain_boundary\")", "def test_intra_sg_isolation(self):\n # create a security group and make it loginable\n secgrp = self._create_security_group('secgrp')\n\n # remove all rules and add ICMP, DHCP and metadata as egress,\n # and ssh as ingress.\n for sgr in secgrp['security_group_rules']:\n self.client.delete_security_group_rule(sgr['id'])\n\n self.create_loginable_secgroup_rule(secgroup_id=secgrp['id'])\n rule_list = [{'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_TCP,\n 'remote_ip_prefix': '169.254.169.254/32',\n 'description': 'metadata out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_UDP,\n 'port_range_min': '67',\n 'port_range_max': '67',\n 'description': 'dhcpv4 out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_ICMP,\n 'description': 'ping out',\n },\n ]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # go vms, go!\n ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n num_servers=2,\n security_groups=[{'name': secgrp['name']}])\n\n # verify SSH functionality. This will ensure that servers were\n # able to reach dhcp + metadata servers\n for fip in fips:\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])\n\n # try to ping instances without intra SG permission (should fail)\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'],\n should_succeed=False)\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'],\n should_succeed=False)\n\n # add intra sg rule. This will allow packets from servers that\n # are in the same sg\n rule_list = [{'direction': constants.INGRESS_DIRECTION,\n 'remote_group_id': secgrp['id']}]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # try to ping instances with intra SG permission\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'])\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'])", "def test_patch_cluster_policy(self):\n pass", "def modify_security_group_configuration(\n self,\n request: dds_20151201_models.ModifySecurityGroupConfigurationRequest,\n ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_security_group_configuration_with_options(request, runtime)", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass" ]
[ "0.6035774", "0.59480536", "0.57567054", "0.57343066", "0.56493175", "0.5547614", "0.54578465", "0.54200065", "0.5414869", "0.5399556", "0.5395597", "0.53883916", "0.5361524", "0.53523713", "0.5309428", "0.52605104", "0.52296615", "0.5224529", "0.52218825", "0.5206037", "0.5162893", "0.51536715", "0.51421016", "0.513549", "0.5112876", "0.5103925", "0.5087399", "0.5086236", "0.5072943", "0.5058302", "0.505382", "0.50472844", "0.50296694", "0.5028348", "0.5026149", "0.50191927", "0.50191814", "0.5018419", "0.49920923", "0.49864486", "0.4974171", "0.49688873", "0.4964521", "0.4942611", "0.49298775", "0.4921149", "0.49204907", "0.49132782", "0.48958763", "0.48951212", "0.48875028", "0.48806325", "0.4874548", "0.4863546", "0.4861836", "0.48564568", "0.4845741", "0.4843717", "0.4843717", "0.48323348", "0.48296392", "0.48243997", "0.4816572", "0.48121276", "0.48063892", "0.48029602", "0.48021924", "0.48012257", "0.4791362", "0.47893822", "0.47888604", "0.47872332", "0.4782896", "0.47807077", "0.4772008", "0.47688726", "0.4765585", "0.4765585", "0.47652832", "0.47648564", "0.47647193", "0.47548214", "0.47548172", "0.47548172", "0.47448653", "0.47401875", "0.47373405", "0.47333917", "0.47244626", "0.4720771", "0.4716853", "0.47166696", "0.47166342", "0.47126672", "0.47090077", "0.4686818", "0.46851662", "0.46849996", "0.4683744", "0.46822238", "0.46744525" ]
0.0
-1
> For a sharded cluster instance, the bound ECS security group takes effect only for mongos nodes.
def modify_security_group_configuration( self, request: dds_20151201_models.ModifySecurityGroupConfigurationRequest, ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse: runtime = util_models.RuntimeOptions() return self.modify_security_group_configuration_with_options(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def grant_grp_self_access ( ec2_conn, grp, start_port, end_port, protocol = 'tcp' ) :\n if not does_grp_rule_exist( grp, grp, start_port, end_port, protocol ) :\n grp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group_id = grp.id )", "def _test_overlapping_sec_grp_rules(self):\n initial_security_groups = []\n if self.stateless_sg:\n md_secgrp = self._create_security_group('metadata_secgrp')\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=md_secgrp['id'])\n initial_security_groups.append(\n {'name': md_secgrp['name']})\n client_ssh, _, vms = self.create_vm_testing_sec_grp(\n num_servers=2, security_groups=initial_security_groups)\n tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(\n num_servers=1, security_groups=initial_security_groups)\n srv_ssh = tmp_ssh[0]\n srv_vm = tmp_vm[0]\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=srv_vm['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n secgrps = []\n for i, vm in enumerate(vms):\n sg = self._create_security_group('secgrp-%d' % i)\n self.create_loginable_secgroup_rule(secgroup_id=sg['id'])\n port = self.client.list_ports(network_id=self.network['id'],\n device_id=vm['server']['id'])['ports'][0]\n self.client.update_port(port['id'], security_groups=[sg['id']])\n secgrps.append(sg)\n tcp_port = 3000\n rule_list = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port,\n 'remote_group_id': secgrps[0]['id']},\n {'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port + 2,\n 'remote_group_id': secgrps[1]['id']}]\n self.client.update_port(srv_port['id'],\n security_groups=[secgrps[0]['id'], secgrps[1]['id']])\n self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])\n\n if self.stateless_sg:\n # NOTE(slaweq): in case of stateless SG, client needs to have also\n # rule which will explicitly accept ingress TCP connections which\n # will be replies from the TCP server so it will use random\n # destination port (depends on the src port choosen by client while\n # establishing connection)\n self.create_security_group_rule(\n security_group_id=secgrps[0]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n self.create_security_group_rule(\n security_group_id=secgrps[1]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n\n # The conntrack entries are ruled by the OF definitions but conntrack\n # status can change the datapath. Let's check the rules in two\n # attempts\n for _ in range(2):\n with utils.StatefulConnection(\n client_ssh[0], srv_ssh, srv_ip, tcp_port) as con:\n con.test_connection()\n for port in range(tcp_port, tcp_port + 3):\n with utils.StatefulConnection(\n client_ssh[1], srv_ssh, srv_ip, port) as con:\n con.test_connection()", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def check_security_group(self):\n return True", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def test_aws_service_api_security_groups_get(self):\n pass", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def auth_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def test_break_security_group_usual_case_specify_sg():", "def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def do_add_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.add_security_group(**opts)\n print(\"Request to add security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Add security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def list_secgroups(self, name=None):", "def change_instance_security_groups(instance_id, security_group_ids):\n\n # Retrieve the IDs of the network interfaces attached to the EC2 instance\n ec2_client = boto3.client('ec2')\n try:\n response = ec2_client.describe_instances(InstanceIds=[instance_id])\n except ClientError as e:\n logging.error(e)\n return False\n instance_info = response['Reservations'][0]['Instances'][0]\n\n # Assign the security groups to each network interface\n for network_interface in instance_info['NetworkInterfaces']:\n try:\n ec2_client.modify_network_interface_attribute(\n NetworkInterfaceId=network_interface['NetworkInterfaceId'],\n Groups=security_group_ids)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def cluster(self):\n assert False", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def test_patch_cluster_role(self):\n pass", "def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )", "def test_secgroup_propagation_local_override(self):\n\n # Needs love - not idempotent\n unit = self.compute_sentry\n if self._get_openstack_release() >= self.trusty_mitaka:\n conf = \"/etc/neutron/plugins/ml2/openvswitch_agent.ini\"\n else:\n conf = \"/etc/neutron/plugins/ml2/ml2_conf.ini\"\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'False'})\n msg = \"Propagation error, expected %s=%s\" % ('enable_security_group',\n 'False')\n self.process_ret(ret=ret, message=msg)\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'False'})\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'True'})", "def test_replace_cluster_role(self):\n pass", "def test_positive_cgroups(self):\n # Test parsing \"cpuset.cpus\" file\n self.assertEqual(self.computer._manager_list[0]._cpu_id_list(), self.cpu_list)\n # This should created per-cpu groups and move all tasks in CPU pool into cpu0\n self.computer.format(alter_network=False, alter_user=False)\n # Test files creation for exclusive CPUs\n for cpu_id in self.cpu_list:\n cpu_n_path = os.path.join(self.cpuset_path, \"cpu\" + str(cpu_id))\n self.assertEqual(str(cpu_id), file_content(os.path.join(cpu_n_path, \"cpuset.cpus\")))\n self.assertEqual(\"1\", file_content(os.path.join(cpu_n_path, \"cpuset.cpu_exclusive\")))\n if cpu_id > 0:\n self.assertEqual(\"\", file_content(os.path.join(cpu_n_path, \"tasks\")))\n\n # Test moving tasks from generic core to private core\n # request PID 1001 to be moved to its private CPU\n request_file_path = os.path.join(self.computer.partition_list[0].path,\n slapos.manager.cpuset.Manager.cpu_exclusive_file)\n file_write(\"1001\\n\", request_file_path)\n # Simulate slapos instance call to perform the actual movement\n self.computer._manager_list[0].instance(\n SlapGridPartitionMock(self.computer.partition_list[0]))\n # Simulate cgroup behaviour - empty tasks in the pool\n file_write(\"\", os.path.join(self.cpuset_path, \"tasks\"))\n # Test that format moved all PIDs from CPU pool into CPU0\n tasks_at_cpu0 = file_content(os.path.join(self.cpuset_path, \"cpu0\", \"tasks\")).split()\n self.assertIn(\"1000\", tasks_at_cpu0)\n # test if the moving suceeded into any provate CPUS (id>0)\n self.assertTrue(any(\"1001\" in file_content(exclusive_task)\n for exclusive_task in glob.glob(os.path.join(self.cpuset_path, \"cpu[1-9]\", \"tasks\"))))\n self.assertIn(\"1002\", tasks_at_cpu0)\n # slapformat should remove successfully moved PIDs from the .slapos-cpu-exclusive file\n self.assertEqual(\"\", file_content(request_file_path).strip())", "def capacitygroup_group():", "def do_remove_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.remove_security_group(**opts)\n print(\"Request to remove security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Remove security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def auth_secgroupeg(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_egress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Egress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def _set_app_security_group(self, security_group):\n pass", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def test_replace_cluster_resource_quota(self):\n pass", "def authorize_cluster_access(IpAddress='0.0.0.0/0'):\n\n ec2_client = boto3.client('ec2')\n\n # Redshift uses port 5439 by default. If Redshift was configured to use\n # a different port, specify the FromPort= and ToPort= arguments accordingly.\n try:\n ec2_client.authorize_security_group_ingress(GroupName='default',\n IpProtocol='tcp',\n FromPort=5439,\n ToPort=5439,\n CidrIp=IpAddress)\n except ClientError as e:\n print(f'ERROR: {e}')\n return False\n return True", "def remove_rds_security_group(payload):\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n action = payload.get(\"action\")\n version = payload.get(\"version\")\n result_data = {}\n\n code = 0\n msg = 'Success'\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n for rds_id in rds_ids:\n sg_results_succ = []\n for sg_id in sg_ids:\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n payload.update({\"server\": rds_ins_uuid})\n payload.update({\"security_group\": sg_uuid})\n payload.update({\"version\": version})\n payload.update({\"action\": action})\n # resp = api.get(payload=payload, timeout=10)\n resp = api.get(payload=payload)\n\n if resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = resp.get(\"msg\")\n # sg_results.update({sg_id: \"failed\"})\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot remove from rds with rds_id %s\" + rds_id)\n else:\n try:\n rds_record.sg = None\n rds_record.save()\n except Exception as exp:\n logger.error(\"save removal of security group to db fail,\"\n \"{}\".format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})", "def security_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group\")", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def app_security_group_management(self) -> pulumi.Output[Optional['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def sg_rule_sets_by_rds(rds, ec2, account, region, output_bucket):\n \"\"\"generate list of rds instances\"\"\"\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n \"\"\"generate list of security groups to get rule set details\"\"\"\n sg_list = ec2.describe_security_groups().get('SecurityGroups')\n\n for sg_obj in sg_list:\n \"\"\"find out how many rdss are using a security group\"\"\"\n for rds_obj in rds_list:\n for rdssg in rds_obj.get('VpcSecurityGroups'):\n \"\"\"check if security group is associated to rds instance\"\"\"\n if sg_obj.get('GroupId') == rdssg.get('VpcSecurityGroupId'):\n \n \"\"\"move on to rule entries\"\"\"\n for rule in sg_obj.get('IpPermissions'):\n \"\"\"cidr as source\"\"\"\n for cidr in rule.get('IpRanges'):\n if cidr.get('CidrIp'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(str(cidr.get('CidrIp'))),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))\n\n \"\"\"security groups as source\"\"\"\n for group in rule.get('UserIdGroupPairs'):\n if group.get('GroupId'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(group.get('GroupId')),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp", "def get_lb_secgrp_type ( app_name ) :\n return app_name.upper( ) + '-LB'", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def reserve_group(self, name, sco):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.reserve_group(name, sco)", "def security_groups(self):\n return int(self.get('security_group_rules'))", "def app_security_group_management(self) -> Optional[pulumi.Input['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def update_instance_security_group(self, instance_id,\n new_security_group_ids):\n ports = port_list(self.request, device_id=instance_id)\n for p in ports:\n params = {'security_groups': new_security_group_ids}\n port_update(self.request, p.id, **params)", "def prevent_forking_outside_group(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def get_sg():\n client = boto3.client('ec2')\n all_instances = client.describe_instances()\n all_sg = client.describe_security_groups()\n\n instance_sg_set = set()\n sg_set = set()\n\n for reservation in all_instances[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n for sg in instance[\"SecurityGroups\"]:\n instance_sg_set.add(sg[\"GroupName\"])\n\n for security_group in all_sg[\"SecurityGroups\"]:\n sg_set.add(security_group[\"GroupName\"])\n\n idle_sg = sg_set - instance_sg_set\n\n return idle_sg", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def AddSecurityGroupEntry(self, security_group, host=None, port=None):\n if self._conn:\n security_groups = self._conn.get_all_security_groups(groupnames=security_group)\n for sg in security_groups:\n if sg.name == security_group:\n return self._conn.authorize_security_group(sg.name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip='%s/32' % host)", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def privilegedStartService(self):\n super(GroupOwnedUNIXServer, self).privilegedStartService()\n\n # Unfortunately, there's no public way to access this. -glyph\n fileName = self._port.port\n chown(fileName, getuid(), self.gid)", "def test_replace_cluster_policy(self):\n pass", "def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name", "def save_security_group(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n name = payload.get(\"description\")\n sg_id = payload.get(\"sg_id\")\n zone_name = payload.get(\"zone\")\n user_name = payload.get(\"owner\")\n zone = ZoneModel.get_zone_by_name(zone_name)\n user = User.objects.get(username=user_name)\n _security_group_ins, err = RdsSecurityGroupModel.objects.create(uuid,\n sg_id,\n name,\n zone,\n user)\n return _security_group_ins, err", "def test_break_security_group_usual_case():", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def test_break_security_group_failed():", "def sp_cpu_exclusive_on(cpuset_info):\n if cpuset_info['storpool.slice']['cpuset.cpu_exclusive'] != 1:\n return ['cpuset:storpool.slice cpu_exclusive is not 1'], []\n return [], []", "def authorize(self, cidr_ip=None, ec2_group=None):\r\n if isinstance(ec2_group, SecurityGroup):\r\n group_name = ec2_group.name\r\n group_owner_id = ec2_group.owner_id\r\n else:\r\n group_name = None\r\n group_owner_id = None\r\n return self.connection.authorize_dbsecurity_group(self.name,\r\n cidr_ip,\r\n group_name,\r\n group_owner_id)", "def _test_multiple_ports_secgroup_inheritance(self):\n # create a security group and make it loginable and pingable\n secgrp = self._create_security_group('secgrp')\n self.create_loginable_secgroup_rule(\n secgroup_id=secgrp['id'])\n self.create_pingable_secgroup_rule(\n secgroup_id=secgrp['id'])\n if self.stateless_sg:\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=secgrp['id'])\n # create two ports with fixed IPs and the security group created\n ports = []\n for i in range(2):\n ports.append(self.create_port(\n self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],\n security_groups=[secgrp['id']]))\n # spawn instances with the ports created\n server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n ports=ports)\n # verify ICMP reachability and ssh connectivity\n for fip in fips:\n self.ping_ip_address(fip['floating_ip_address'])\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])", "def prod_load_balancer_sg_valid(self) -> None:\n if self.prod_env:\n sg_name = 'saints-xctf-prod-server-elb-security-group'\n else:\n sg_name = 'saints-xctf-dev-server-elb-security-group'\n\n response = self.ec2.describe_security_groups(Filters=[\n {\n 'Name': 'group-name',\n 'Values': [sg_name]\n }\n ])\n\n security_group = response.get('SecurityGroups')[0]\n\n self.assertTrue(all([\n security_group.get('GroupName') == sg_name,\n self.validate_load_balancer_sg_rules(\n security_group.get('IpPermissions'),\n security_group.get('IpPermissionsEgress')\n )\n ]))", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def test_placement_group(ray_start_2_cpus):\n num_workers = 2\n bundle = {\"CPU\": 1}\n bundles = [bundle.copy() for _ in range(num_workers)]\n placement_group = ray.util.placement_group(bundles)\n wg = WorkerGroup(num_workers=num_workers, placement_group=placement_group)\n wg.remove_workers([0])\n wg.add_workers(1)", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def grant_ssh_access ( ec2_conn, tgt_grps, nat_grp ) :\n for grp in tgt_grps :\n grant_grp_access( ec2_conn, [ nat_grp ], grp, 22 )", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def test_replace_cluster_role_binding(self):\n pass", "def _remove_server_from_sgroup(**kwargs):\n # A10 Lightning APIs\n ServerGrpApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpPolicyApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}/policies\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpImportApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/_import\".format(\n kwargs['applicationId'], kwargs['hostId'], kwargs['serviceId'])\n\n # Build the requests\n request1 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpApi)\n request2 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpPolicyApi)\n request3 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpImportApi)\n\n # Auth header\n cred = A10User + ':' + A10UserPassword\n bas64 = b64encode(bytes(cred))\n auth = \"Basic \" + bas64.decode(\"ascii\")\n \n # Complete header dict\n headers = {\n \"provider\": \"root\",\n \"tenant\": A10Tenant,\n \"Content-Type\": \"application/json\",\n \"Authorization\": auth\n }\n\n # Attach all the headers to the requests\n for key, value in headers.items():\n request1.add_header(key, value)\n request2.add_header(key, value)\n request3.add_header(key, value)\n\n # First retrieve the server group data\n response = urllib2.urlopen(request1)\n server_grp_data = json.loads(response.read().decode(\"utf-8\"))\n servers = server_grp_data['servers']\n \n # Remove the required server\n for serv in servers:\n if serv['ipAddress'] == _get_public_ip_addr(ServerInstanceID):\n servers.remove(serv)\n\n # Get server group policies\n response = urllib2.urlopen(request2)\n srv_policies = json.loads(response.read().decode(\"utf-8\"))\n \n # Add parsed server data and server group policies and post it\n server_grp_data['servers'] = servers\n server_grp_data['policies'] = srv_policies\n urllib2.urlopen(request3, json.dumps(server_grp_data).encode(\"utf-8\"))", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def allow_sgcmd(params, sg, protocol_str, cidr, egress, force):\n ec2 = get_ec2_connection()\n sg_obj = convert_sg_name_to_sg(sg)\n protocol, target_port_low, target_port_high = parse_protocol_port_string(protocol_str)\n if is_debugging:\n print(\"Protocol %s\" % protocol)\n print(\"Target port %s to %s\" % (target_port_low, target_port_high))\n print(\"Source ID %s\" % cidr)\n cidr = expand_cidr_string(cidr)\n if egress:\n sg_obj.authorize_egress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )\n else:\n sg_obj.authorize_ingress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )", "def _split_ns_by_scatter(cls,\n shard_count,\n namespace,\n raw_entity_kind,\n app):\n if shard_count == 1:\n\n return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n ds_query = datastore.Query(kind=raw_entity_kind,\n namespace=namespace,\n _app=app,\n keys_only=True)\n ds_query.Order(\"__scatter__\")\n oversampling_factor = 32\n random_keys = ds_query.Get(shard_count * oversampling_factor)\n\n if not random_keys:\n\n\n return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n [None] * (shard_count - 1))\n\n random_keys.sort()\n\n if len(random_keys) >= shard_count:\n\n random_keys = cls._choose_split_points(random_keys, shard_count)\n\n k_ranges = []\n\n k_ranges.append(key_range.KeyRange(\n key_start=None,\n key_end=random_keys[0],\n direction=key_range.KeyRange.ASC,\n include_start=False,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n for i in range(0, len(random_keys) - 1):\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[i],\n key_end=random_keys[i+1],\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[-1],\n key_end=None,\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n if len(k_ranges) < shard_count:\n\n k_ranges += [None] * (shard_count - len(k_ranges))\n return k_ranges", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def is_sgw_ce_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"sg_ce\"]\n except KeyError:\n return False", "def test_patch_cluster_resource_quota(self):\n pass", "def test_intra_sg_isolation(self):\n # create a security group and make it loginable\n secgrp = self._create_security_group('secgrp')\n\n # remove all rules and add ICMP, DHCP and metadata as egress,\n # and ssh as ingress.\n for sgr in secgrp['security_group_rules']:\n self.client.delete_security_group_rule(sgr['id'])\n\n self.create_loginable_secgroup_rule(secgroup_id=secgrp['id'])\n rule_list = [{'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_TCP,\n 'remote_ip_prefix': '169.254.169.254/32',\n 'description': 'metadata out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_UDP,\n 'port_range_min': '67',\n 'port_range_max': '67',\n 'description': 'dhcpv4 out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_ICMP,\n 'description': 'ping out',\n },\n ]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # go vms, go!\n ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n num_servers=2,\n security_groups=[{'name': secgrp['name']}])\n\n # verify SSH functionality. This will ensure that servers were\n # able to reach dhcp + metadata servers\n for fip in fips:\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])\n\n # try to ping instances without intra SG permission (should fail)\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'],\n should_succeed=False)\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'],\n should_succeed=False)\n\n # add intra sg rule. This will allow packets from servers that\n # are in the same sg\n rule_list = [{'direction': constants.INGRESS_DIRECTION,\n 'remote_group_id': secgrp['id']}]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # try to ping instances with intra SG permission\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'])\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'])", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def test_patch_cluster_policy(self):\n pass", "def security_group_id_for_domain_boundary(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id_for_domain_boundary\")", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass" ]
[ "0.6038478", "0.5949765", "0.5760521", "0.57362705", "0.56504565", "0.55500543", "0.5461943", "0.5421799", "0.54184514", "0.54021674", "0.5399247", "0.5391312", "0.53637403", "0.53541446", "0.5313645", "0.5262325", "0.5229808", "0.5227352", "0.5222983", "0.52090144", "0.51645213", "0.5156898", "0.5144462", "0.5139043", "0.5112529", "0.51054627", "0.5088506", "0.5086639", "0.50759476", "0.5059506", "0.5055603", "0.50483954", "0.5031053", "0.50303584", "0.50282776", "0.50231355", "0.50219303", "0.50201464", "0.49933556", "0.49889466", "0.49763265", "0.49709225", "0.4965626", "0.49438238", "0.49321052", "0.49239254", "0.49232367", "0.49133566", "0.48981646", "0.4896572", "0.48893136", "0.48820457", "0.48764816", "0.48635265", "0.4861763", "0.4857638", "0.4848043", "0.48446634", "0.48446634", "0.48336956", "0.48322532", "0.48256296", "0.4818407", "0.4813093", "0.4809709", "0.48063126", "0.48060122", "0.4804232", "0.47931102", "0.47930574", "0.47906664", "0.47900212", "0.4785427", "0.47841725", "0.4770986", "0.47692195", "0.4769016", "0.47670683", "0.47666472", "0.47656783", "0.47656783", "0.47557762", "0.47557762", "0.47549793", "0.474458", "0.474099", "0.4740735", "0.4735618", "0.47232208", "0.4722722", "0.47196797", "0.47187135", "0.4718074", "0.47133362", "0.47099268", "0.46878302", "0.46873716", "0.4685415", "0.46851218", "0.46749285" ]
0.46838224
99
> For a sharded cluster instance, the bound ECS security group takes effect only for mongos nodes.
async def modify_security_group_configuration_async( self, request: dds_20151201_models.ModifySecurityGroupConfigurationRequest, ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse: runtime = util_models.RuntimeOptions() return await self.modify_security_group_configuration_with_options_async(request, runtime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_rds_security_group(payload):\n # version = payload.get(\"version\")\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n apply_action = \"GrantSecurityGroup\"\n remove_action = \"RemoveSecurityGroup\"\n check_instance_security_action = \"DescribeSecurityGroupByInstance\"\n version = payload.get(\"version\")\n result_data = {}\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n if len(sg_ids) > 1:\n return console_response(\n SecurityErrorCode.ONE_SECURITY_PER_INSTANCE_ERROR, \"modify failed\")\n sg_id = sg_ids[0]\n\n code = 0\n msg = 'Success'\n for rds_id in rds_ids:\n sg_results_succ = []\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n\n payload.update(\n {\"action\": check_instance_security_action, \"version\": version,\n \"server\": rds_ins_uuid})\n # check_resp = api.get(payload=payload, timeout=10)\n check_resp = api.get(payload=payload)\n if check_resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = check_resp.get(\"msg\")\n continue\n\n # if the instance already has a security group, remove it\n if check_resp[\"data\"][\"total_count\"] > 0:\n old_sg_uuid = check_resp[\"data\"][\"ret_set\"][0][\"id\"]\n payload.update({\"action\": remove_action, \"version\": version,\n \"server\": rds_ins_uuid,\n \"security_group\": old_sg_uuid})\n # remove_resp = api.get(payload=payload, timeout=10)\n remove_resp = api.get(payload=payload)\n if remove_resp.get(\"code\") != 0:\n logger.debug(\"the resp of removing the old securty group is:\" +\n str(remove_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = remove_resp.get(\"msg\")\n continue\n else:\n rds_record.sg = None\n rds_record.save()\n\n # grant the new security group to the instance\n payload.update({\"action\": apply_action, \"version\": version,\n \"server\": rds_ins_uuid, \"security_group\": sg_uuid})\n # grant_resp = api.get(payload=payload, timeout=10)\n grant_resp = api.get(payload=payload)\n\n if grant_resp.get(\"code\") != 0:\n logger.debug(\"the resp of granting the new securty group is:\" +\n str(grant_resp))\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = grant_resp.get(\"msg\")\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot apply to rds with rds_id \" + rds_id)\n else:\n try:\n rds_record.sg = RdsSecurityGroupModel.\\\n get_security_by_id(sg_id)\n rds_record.save()\n except Exception as exp:\n logger.error(\"cannot save grant sg to rds to db, {}\".\n format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def update_cluster_security_group(ec2, cluster_props):\n vpc = ec2.Vpc(id=cluster_props['VpcId'])\n\n # The first Security group should be the default one\n defaultSg = list(vpc.security_groups.all())[0]\n print(\"Default Security group:\", defaultSg)\n\n # Authorize access\n try:\n defaultSg.authorize_ingress(GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n print(\"Access authorized\")\n except botocore.exceptions.ClientError as e:\n print(\"ClientError:\", e)\n except Exception as e:\n print(\"Error:\", e)", "def _set_security_group(client, instance_id_list, security_groups):\n logging.info('Setting the security group of instances.')\n for instance_id in instance_id_list:\n client.modify_instance_attribute(InstanceId=instance_id, Groups=security_groups)", "def grant_grp_self_access ( ec2_conn, grp, start_port, end_port, protocol = 'tcp' ) :\n if not does_grp_rule_exist( grp, grp, start_port, end_port, protocol ) :\n grp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n src_group_id = grp.id )", "def _test_overlapping_sec_grp_rules(self):\n initial_security_groups = []\n if self.stateless_sg:\n md_secgrp = self._create_security_group('metadata_secgrp')\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=md_secgrp['id'])\n initial_security_groups.append(\n {'name': md_secgrp['name']})\n client_ssh, _, vms = self.create_vm_testing_sec_grp(\n num_servers=2, security_groups=initial_security_groups)\n tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(\n num_servers=1, security_groups=initial_security_groups)\n srv_ssh = tmp_ssh[0]\n srv_vm = tmp_vm[0]\n srv_port = self.client.list_ports(network_id=self.network['id'],\n device_id=srv_vm['server']['id'])['ports'][0]\n srv_ip = srv_port['fixed_ips'][0]['ip_address']\n secgrps = []\n for i, vm in enumerate(vms):\n sg = self._create_security_group('secgrp-%d' % i)\n self.create_loginable_secgroup_rule(secgroup_id=sg['id'])\n port = self.client.list_ports(network_id=self.network['id'],\n device_id=vm['server']['id'])['ports'][0]\n self.client.update_port(port['id'], security_groups=[sg['id']])\n secgrps.append(sg)\n tcp_port = 3000\n rule_list = [{'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port,\n 'remote_group_id': secgrps[0]['id']},\n {'protocol': constants.PROTO_NUM_TCP,\n 'direction': constants.INGRESS_DIRECTION,\n 'port_range_min': tcp_port,\n 'port_range_max': tcp_port + 2,\n 'remote_group_id': secgrps[1]['id']}]\n self.client.update_port(srv_port['id'],\n security_groups=[secgrps[0]['id'], secgrps[1]['id']])\n self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])\n\n if self.stateless_sg:\n # NOTE(slaweq): in case of stateless SG, client needs to have also\n # rule which will explicitly accept ingress TCP connections which\n # will be replies from the TCP server so it will use random\n # destination port (depends on the src port choosen by client while\n # establishing connection)\n self.create_security_group_rule(\n security_group_id=secgrps[0]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n self.create_security_group_rule(\n security_group_id=secgrps[1]['id'],\n protocol=constants.PROTO_NAME_TCP,\n direction=constants.INGRESS_DIRECTION)\n\n # The conntrack entries are ruled by the OF definitions but conntrack\n # status can change the datapath. Let's check the rules in two\n # attempts\n for _ in range(2):\n with utils.StatefulConnection(\n client_ssh[0], srv_ssh, srv_ip, tcp_port) as con:\n con.test_connection()\n for port in range(tcp_port, tcp_port + 3):\n with utils.StatefulConnection(\n client_ssh[1], srv_ssh, srv_ip, port) as con:\n con.test_connection()", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def dvs_port_security_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n instances = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(3)\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[security_group.name])\n\n ips = [os_conn.get_nova_instance_ip(i, net_name=self.inter_net_name)\n for i in instances]\n ip_pair = dict.fromkeys([access_point_ip])\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(4)\n ips = []\n for instance in instances:\n port = os_conn.neutron.create_port({\n \"port\": {\n \"network_id\": default_net.id,\n \"device_id\": instance.id\n }})['port']\n ips.append(port['fixed_ips'][0]['ip_address'])\n\n self.show_step(5)\n for key in ip_pair:\n ip_pair[key] = ips\n openstack.check_connection_vms(ip_pair, result_of_command=1)", "def check_security_group(self):\n return True", "def grant_grp_access ( ec2_conn, incoming_grps, tgt_grp, port, protocol = 'tcp' ) :\n for grp in incoming_grps :\n if not does_grp_rule_exist( tgt_grp, grp, port, port, protocol ) :\n tgt_grp.authorize( ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group = grp )\n ec2_conn.authorize_security_group_egress( group_id = grp.id,\n ip_protocol = protocol,\n from_port = port,\n to_port = port,\n src_group_id = tgt_grp.id )", "def test_aws_service_api_security_groups_get(self):\n pass", "def process_security_group ( ec2_conn, vpc, base_name, params, secgrp_type = None, secgrp_description = None ) :\n if not secgrp_type :\n secgrp_type = params[ 'type' ]\n if not secgrp_description :\n secgrp_description = params[ 'description' ]\n\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n if not secgrp :\n if params.get( 'create', 'NO' ) == 'YES' :\n secgrp_name = get_secgrp_name( base_name, secgrp_type )\n print \"Creating security group with name: \" + secgrp_name\n secgrp = create_secgrp( ec2_conn, vpc, secgrp_name, secgrp_description )\n else :\n print \"ERROR: Could not find group with name \" + get_secgrp_name( base_name, secgrp_type )\n sys.exit( 1 )\n\n print \"Prepping rules for security group \" + secgrp.name\n remove_all_rules( ec2_conn, [ secgrp ], True, base_name )\n\n # Reload group to retrieve new object with no rules.\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n\n is_public = params.get( 'public' ) == 'YES'\n if is_public :\n nat_secgrp = None\n if params.get( 'os-update' ) == 'YES' :\n ec2_conn.authorize_security_group_egress( group_id = secgrp.id,\n ip_protocol = \"tcp\",\n from_port = 80,\n to_port = 80,\n cidr_ip = all_ip_cidr )\n if params.get( 'public-tcp-ports' ) :\n public_ports = params[ 'public-tcp-ports' ]\n for port in public_ports :\n secgrp.authorize( ip_protocol = \"tcp\",\n from_port = port,\n to_port = port,\n cidr_ip = all_ip_cidr )\n\n if params.get( 'incoming-cidr-rules' ) :\n for incoming_rule in params[ 'incoming-cidr-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n\n protocol = get_secgrp_protocol_param( incoming_rule )\n cidr_list = get_cidr_param( incoming_rule[ 'cidr' ] )\n\n secgrp.authorize( ip_protocol = protocol,\n from_port = start_port,\n to_port = end_port,\n cidr_ip = cidr_list )\n\n else :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n # Grant NAT access to login to the machine\n if not nat_secgrp :\n print \"ERROR: Could not find NAT security group!\"\n sys.exit( 1 )\n grant_ssh_access( ec2_conn, [ secgrp ], nat_secgrp )\n if params.get( 'os-update' ) == 'YES' :\n grant_cidr_access( ec2_conn, all_ip_cidr, [ secgrp ], 80, nat_secgrp )\n # Need to reload secgrp so it contains latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n # Need to reload NAT secgrp so it contains latest rules\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n\n if params.get( 'outgoing-cidr-rules' ) :\n for outgoing_rule in params[ 'outgoing-cidr-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n cidr_list = get_cidr_param( outgoing_rule[ 'cidr' ] )\n\n for cidr in cidr_list :\n grant_cidr_access( ec2_conn, cidr, [ secgrp ], start_port, nat_secgrp, protocol )\n\n if params.get( 'outgoing-group-rules' ) :\n for outgoing_rule in params[ 'outgoing-group-rules' ] :\n start_port = outgoing_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( outgoing_rule )\n target_secgrp_type = outgoing_rule[ 'group-type' ]\n target_secgrp = find_group( ec2_conn, base_name, target_secgrp_type )\n grant_grp_access( ec2_conn, [ secgrp ], target_secgrp, start_port, protocol )\n \n if params.get( 'incoming-group-rules' ) :\n for incoming_rule in params[ 'incoming-group-rules' ] :\n start_port = incoming_rule.get( 'port' )\n end_port = start_port\n protocol = get_secgrp_protocol_param( incoming_rule )\n incoming_secgrp_type = incoming_rule[ 'group-type' ]\n incoming_secgrp = find_group( ec2_conn, base_name, incoming_secgrp_type )\n grant_grp_access( ec2_conn, [ incoming_secgrp ], secgrp, start_port, protocol )\n\n if params.get( 'self-rules' ) :\n for self_rule in params[ 'self-rules' ] :\n start_port = self_rule.get( 'port' )\n end_port = start_port\n\n if not start_port :\n start_port = self_rule[ 'port-range' ][ 'start' ]\n end_port = self_rule[ 'port-range' ][ 'end' ]\n\n protocol = get_secgrp_protocol_param( self_rule )\n\n grant_grp_self_access( ec2_conn, secgrp, start_port, end_port, protocol )\n\n # Reload the security group so it contains all the latest rules\n secgrp = find_group( ec2_conn, base_name, secgrp_type )\n return ( secgrp_type, secgrp )", "def auth_secgroup(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_ingress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Ingress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def test_break_security_group_usual_case_specify_sg():", "def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None", "def handle_region(self, region, args):\n result = [CHECKMARK, str(region), \"created security group '{}'\".format(GROUP_NAME)]\n\n try:\n # Create the security group\n response = region.conn.create_security_group(\n Description='Security group for Alia replicas and clients.',\n GroupName=GROUP_NAME,\n )\n\n # Get the newly created group id\n group_id = response[\"GroupId\"]\n\n # Allow all network traffic from within the security group\n response = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 0, \"ToPort\": 65535,\n \"UserIdGroupPairs\": [\n {\n \"GroupId\": group_id,\n \"Description\": \"allow all traffic from the same group\",\n }\n ]\n }\n ]\n )\n\n # Open Alia-specific ports for access\n reponse = region.conn.authorize_security_group_ingress(\n GroupId = group_id,\n IpPermissions = [\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 22, \"ToPort\": 22,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"allow remote SSH access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 3264, \"ToPort\": 3285,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"external Alia service access\",\n }\n ],\n \"Ipv6Ranges\": [\n {\n \"CidrIpv6\": \"::/0\",\n \"Description\": \"external Alia service IPv6 access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 5356, \"ToPort\": 5356,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"research services access\"\n }\n ]\n },\n {\n \"IpProtocol\": \"tcp\", \"FromPort\": 4157, \"ToPort\": 4157,\n \"IpRanges\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"Description\": \"master services access\",\n }\n ]\n },\n ]\n )\n\n\n except Exception as e:\n result[0] = CROSSMARK\n result[2] = str(e)\n\n\n return result", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def do_add_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.add_security_group(**opts)\n print(\"Request to add security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Add security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def create_sec_group(ec2, sec_group_name):\n sec = ec2.create_security_group(sec_group_name, 'Jvivian Boto SecGroup')\n port = 22\n sec.authorize('tcp', port, port, '0.0.0.0/0')", "def list_secgroups(self, name=None):", "def change_instance_security_groups(instance_id, security_group_ids):\n\n # Retrieve the IDs of the network interfaces attached to the EC2 instance\n ec2_client = boto3.client('ec2')\n try:\n response = ec2_client.describe_instances(InstanceIds=[instance_id])\n except ClientError as e:\n logging.error(e)\n return False\n instance_info = response['Reservations'][0]['Instances'][0]\n\n # Assign the security groups to each network interface\n for network_interface in instance_info['NetworkInterfaces']:\n try:\n ec2_client.modify_network_interface_attribute(\n NetworkInterfaceId=network_interface['NetworkInterfaceId'],\n Groups=security_group_ids)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def rule_40_can_create_sg(session):\n\n def try_create(session, side):\n res, conn_vpc = session[\"config\"][side][\"res\"], session[\"conn\"][side](\"vpc\")\n subnet = conn_vpc.get_all_subnets([res[\"subnet_id\"]])[0]\n\n try:\n conn_vpc.create_security_group(\n \"foo\", \"bar\", vpc_id = subnet.vpc_id, dry_run = True)\n except EC2ResponseError as e:\n if 412 != e.status:\n raise e\n\n try_create(session, \"server\")\n try_create(session, \"client\")\n\n return True", "def test_modify_storage_group_srdf_set_consistency_enable(self):\n if not self.run_consistency_enable_check():\n self.skipTest(\n 'Skip test_modify_storage_group_srdf_set_consistency_enable '\n 'This fix is in V9.2.1.7')\n sg_name, srdf_group_number, local_volume, remote_volume = (\n self.create_rdf_sg())\n self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, action='setmode',\n srdf_group_number=srdf_group_number,\n options={'setMode': {'mode': 'Asynchronous'}})\n status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"EnableConsistency\")\n self.assertEqual('Enabled', status.get('consistency_protection'))\n disable_status = self.replication.modify_storage_group_srdf(\n storage_group_id=sg_name, srdf_group_number=srdf_group_number,\n action=\"DisableConsistency\")\n self.assertEqual(\n 'Disabled', disable_status.get('consistency_protection'))", "def cluster(self):\n assert False", "def enableRemoteAccess():\n myClusterProps = redshift.describe_clusters(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER\n )['Clusters'][0]\n\n # attempt to allow incoming TCP connections to the cluster\n print(\"Enabling incoming TCP connections...\")\n try:\n vpc = ec2.Vpc(id=myClusterProps[\"VpcId\"])\n for sg in vpc.security_groups.all():\n if sg.group_name == \"default\":\n defaultSg = sg\n break\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=CIDR_IP_ALLOWED,\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)\n return\n\n print(\"\\nCluster is ready for access!\")\n endpoint = myClusterProps[\"Endpoint\"][\"Address\"]\n print(f\"Endpoint: {endpoint}\")", "def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )", "def test_patch_cluster_role(self):\n pass", "def test_secgroup_propagation_local_override(self):\n\n # Needs love - not idempotent\n unit = self.compute_sentry\n if self._get_openstack_release() >= self.trusty_mitaka:\n conf = \"/etc/neutron/plugins/ml2/openvswitch_agent.ini\"\n else:\n conf = \"/etc/neutron/plugins/ml2/ml2_conf.ini\"\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'False'})\n msg = \"Propagation error, expected %s=%s\" % ('enable_security_group',\n 'False')\n self.process_ret(ret=ret, message=msg)\n self.d.configure('neutron-openvswitch',\n {'disable-security-groups': 'False'})\n self.d.configure('neutron-api', {'neutron-security-groups': 'True'})\n self._wait_and_check()\n ret = u.validate_config_data(unit, conf, 'securitygroup',\n {'enable_security_group': 'True'})", "def test_replace_cluster_role(self):\n pass", "def test_positive_cgroups(self):\n # Test parsing \"cpuset.cpus\" file\n self.assertEqual(self.computer._manager_list[0]._cpu_id_list(), self.cpu_list)\n # This should created per-cpu groups and move all tasks in CPU pool into cpu0\n self.computer.format(alter_network=False, alter_user=False)\n # Test files creation for exclusive CPUs\n for cpu_id in self.cpu_list:\n cpu_n_path = os.path.join(self.cpuset_path, \"cpu\" + str(cpu_id))\n self.assertEqual(str(cpu_id), file_content(os.path.join(cpu_n_path, \"cpuset.cpus\")))\n self.assertEqual(\"1\", file_content(os.path.join(cpu_n_path, \"cpuset.cpu_exclusive\")))\n if cpu_id > 0:\n self.assertEqual(\"\", file_content(os.path.join(cpu_n_path, \"tasks\")))\n\n # Test moving tasks from generic core to private core\n # request PID 1001 to be moved to its private CPU\n request_file_path = os.path.join(self.computer.partition_list[0].path,\n slapos.manager.cpuset.Manager.cpu_exclusive_file)\n file_write(\"1001\\n\", request_file_path)\n # Simulate slapos instance call to perform the actual movement\n self.computer._manager_list[0].instance(\n SlapGridPartitionMock(self.computer.partition_list[0]))\n # Simulate cgroup behaviour - empty tasks in the pool\n file_write(\"\", os.path.join(self.cpuset_path, \"tasks\"))\n # Test that format moved all PIDs from CPU pool into CPU0\n tasks_at_cpu0 = file_content(os.path.join(self.cpuset_path, \"cpu0\", \"tasks\")).split()\n self.assertIn(\"1000\", tasks_at_cpu0)\n # test if the moving suceeded into any provate CPUS (id>0)\n self.assertTrue(any(\"1001\" in file_content(exclusive_task)\n for exclusive_task in glob.glob(os.path.join(self.cpuset_path, \"cpu[1-9]\", \"tasks\"))))\n self.assertIn(\"1002\", tasks_at_cpu0)\n # slapformat should remove successfully moved PIDs from the .slapos-cpu-exclusive file\n self.assertEqual(\"\", file_content(request_file_path).strip())", "def capacitygroup_group():", "def do_remove_security_group(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['security_group'] = args.security_group\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.remove_security_group(**opts)\n print(\"Request to remove security group for container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Remove security group for container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def auth_secgroupeg(self, args):\n region = args[\"Region\"]\n sgid = args[\"Security-group-ID\"]\n protocol = args[\"protocol\"]\n from_port = int(args[\"FromPort\"])\n to_port = int(args[\"ToPort\"])\n ip_range = args[\"IpRange\"]\n message = MessageClass()\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n\n data = ec2.authorize_security_group_egress(\n GroupId=sgid,\n IpPermissions=[\n {'IpProtocol': protocol,\n 'FromPort': from_port,\n 'ToPort': to_port,\n 'IpRanges': [{'CidrIp': ip_range}]}\n ])\n attachment = MessageAttachmentsClass()\n attachment.title = data\n message.message_text = \"Egress Successfully Set :\"\n message.attach(attachment)\n\n return message.to_json()", "def create_groups(self, role):\n security_group_names = self._get_all_group_names()\n\n cluster_group_name = self.get_cluster_group_name()\n if not cluster_group_name in security_group_names:\n self.ec2Connection.create_security_group(cluster_group_name, \"Hadoop cluster (%s)\" % (self.name))\n self.ec2Connection.authorize_security_group(cluster_group_name, cluster_group_name)\n # Allow SSH from anywhere\n self.ec2Connection.authorize_security_group(cluster_group_name, ip_protocol=\"tcp\", from_port=22, to_port=22, cidr_ip=\"0.0.0.0/0\")\n\n role_group_name = self.group_name_for_role(role)\n if not role_group_name in security_group_names:\n self.ec2Connection.create_security_group(role_group_name, \"Hadoop %s (%s)\" % (role, self.name))", "def createSG(ec2,name,rules):\n\t# check if the security group exists\n\tgroup = None\n\tsgGroups = [sg for sg in ec2.get_all_security_groups() if sg.name == name]\n\tif sgGroups:\n\t\tgroup = sgGroups[0]\n\t\tec2.delete_security_group(name=name, group_id=group)\t\n\tprint \"Creating %s Security Group\" % name\n\tgroup = ec2.create_security_group(name, 'group for %s' % name)\n\tif group:\n\t\t# Set the inbound rules\n\t\tfor rule in rules:\n\t\t\tif rule.src_group_name:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=group)\n\t\t\telse:\n\t\t\t\tgroup.authorize(ip_protocol=rule.ip_protocol,from_port=rule.from_port,to_port=rule.to_port,cidr_ip=rule.cidr_ip,src_group=None)\n\t\treturn True\n\telse:\n\t\tlogError('Error during '+name+' Security Group update')\n\t\treturn False", "def _set_app_security_group(self, security_group):\n pass", "def test_replace_cluster_resource_quota(self):\n pass", "def authorize_cluster_access(IpAddress='0.0.0.0/0'):\n\n ec2_client = boto3.client('ec2')\n\n # Redshift uses port 5439 by default. If Redshift was configured to use\n # a different port, specify the FromPort= and ToPort= arguments accordingly.\n try:\n ec2_client.authorize_security_group_ingress(GroupName='default',\n IpProtocol='tcp',\n FromPort=5439,\n ToPort=5439,\n CidrIp=IpAddress)\n except ClientError as e:\n print(f'ERROR: {e}')\n return False\n return True", "def remove_rds_security_group(payload):\n rds_ids = payload.pop(\"resource_id\")\n sg_ids = payload.pop(\"sg_id\")\n action = payload.get(\"action\")\n version = payload.get(\"version\")\n result_data = {}\n\n code = 0\n msg = 'Success'\n\n succ, resp = get_ha_rds_backend_instance_info(payload)\n if not succ:\n return resp\n rds_2_instance = {rds: instance for instance, rds in resp.items()}\n\n for rds_id in rds_ids:\n sg_results_succ = []\n for sg_id in sg_ids:\n sg = RdsSecurityGroupModel.get_security_by_id(sg_id=sg_id)\n sg_uuid = sg.uuid\n visible_rds_record = RdsModel.get_rds_by_id(rds_id=rds_id)\n if visible_rds_record.rds_type == 'ha':\n rds_group = visible_rds_record.rds_group\n rds_records = RdsModel.get_rds_records_by_group(rds_group)\n else:\n rds_records = []\n for rds_record in rds_records:\n rds_ins_uuid = rds_2_instance.get(rds_record.uuid)\n payload.update({\"server\": rds_ins_uuid})\n payload.update({\"security_group\": sg_uuid})\n payload.update({\"version\": version})\n payload.update({\"action\": action})\n # resp = api.get(payload=payload, timeout=10)\n resp = api.get(payload=payload)\n\n if resp.get(\"code\") != 0:\n code = CommonErrorCode.REQUEST_API_ERROR\n msg = resp.get(\"msg\")\n # sg_results.update({sg_id: \"failed\"})\n logger.error(\n \"security_group with sg_id \" + sg_id +\n \" cannot remove from rds with rds_id %s\" + rds_id)\n else:\n try:\n rds_record.sg = None\n rds_record.save()\n except Exception as exp:\n logger.error(\"save removal of security group to db fail,\"\n \"{}\".format(exp.message))\n else:\n sg_results_succ.append(sg_id)\n result_data.update({rds_id: sg_results_succ})\n resp = console_response(code, msg, len(result_data.keys()), [result_data])\n return resp", "def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})", "def security_group(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group\")", "def dvs_instances_one_group(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(2)\n self.show_step(3)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n vm_count=vm_count,\n security_groups=[security_group.name])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(4)\n srv_list = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, srv_list)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair)\n\n self.show_step(5)\n for srv in srv_list:\n os_conn.nova.servers.delete(srv)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(srv)", "def app_security_group_management(self) -> pulumi.Output[Optional['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def sg_rule_sets_by_rds(rds, ec2, account, region, output_bucket):\n \"\"\"generate list of rds instances\"\"\"\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n \"\"\"generate list of security groups to get rule set details\"\"\"\n sg_list = ec2.describe_security_groups().get('SecurityGroups')\n\n for sg_obj in sg_list:\n \"\"\"find out how many rdss are using a security group\"\"\"\n for rds_obj in rds_list:\n for rdssg in rds_obj.get('VpcSecurityGroups'):\n \"\"\"check if security group is associated to rds instance\"\"\"\n if sg_obj.get('GroupId') == rdssg.get('VpcSecurityGroupId'):\n \n \"\"\"move on to rule entries\"\"\"\n for rule in sg_obj.get('IpPermissions'):\n \"\"\"cidr as source\"\"\"\n for cidr in rule.get('IpRanges'):\n if cidr.get('CidrIp'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(str(cidr.get('CidrIp'))),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))\n\n \"\"\"security groups as source\"\"\"\n for group in rule.get('UserIdGroupPairs'):\n if group.get('GroupId'):\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port'))),\n misc.check_if(sg_obj.get('GroupId')),\n misc.check_if(sg_obj.get('GroupName')),\n misc.check_if(group.get('GroupId')),\n misc.check_if(str(check_port(rule.get('FromPort')))),\n misc.check_if(str(check_port(rule.get('ToPort')))),\n misc.check_if(str(check_proto(rule.get('IpProtocol'))))\n )))", "def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp", "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", "def get_lb_secgrp_type ( app_name ) :\n return app_name.upper( ) + '-LB'", "def reserve_group(self, name, sco):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.reserve_group(name, sco)", "def ensure_security_groups_created(vpc, environment):\n conglomerate_name = environment + '-conglomerate'\n load_balancer_name = environment + '-load-balancer'\n\n existing = vpc.security_groups.filter(Filters=[\n { 'Name': 'group-name', 'Values': [ conglomerate_name, load_balancer_name ] }\n ])\n ret = {}\n for security_group in existing:\n if security_group.group_name == conglomerate_name:\n ret['conglomerate'] = security_group\n elif security_group.group_name == load_balancer_name:\n ret['load-balancer'] = security_group\n else:\n raise Exception(\"Unexpected security group name: \" + security_group.group_name)\n\n if not ret['conglomerate']:\n # untested\n ret['conglomerate'] = vpc.create_security_group(\n GroupName=conglomerate_name,\n Description=conglomerate_name\n )\n if not ret['load-balancer']:\n # untested\n ret['load-balancer'] = vpc.create_security_group(\n GroupName=load_balancer_name,\n Description=load_balancer_name\n )\n\n try:\n ret['conglomerate'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 9000, 'ToPort': 9000, 'UserIdGroupPairs': [ { 'GroupId': ret['load-balancer'].id } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n try:\n ret['load-balancer'].authorize_ingress(IpPermissions=[\n { 'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80 },\n { 'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443 },\n { 'IpProtocol': 'icmp', 'FromPort': 0, 'ToPort': 255, 'IpRanges': [ { 'CidrIp': '0.0.0.0/0' } ] },\n { 'IpProtocol': 'tcp', 'FromPort': 1024, 'ToPort': 65535, 'IpRanges': [ { 'CidrIp': Constants['VpcCidr'] } ] },\n ])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n raise e\n\n return ret", "def security_groups(self):\n return int(self.get('security_group_rules'))", "def app_security_group_management(self) -> Optional[pulumi.Input['DomainAppSecurityGroupManagement']]:\n return pulumi.get(self, \"app_security_group_management\")", "def update_instance_security_group(self, instance_id,\n new_security_group_ids):\n ports = port_list(self.request, device_id=instance_id)\n for p in ports:\n params = {'security_groups': new_security_group_ids}\n port_update(self.request, p.id, **params)", "def prevent_forking_outside_group(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def get_sg():\n client = boto3.client('ec2')\n all_instances = client.describe_instances()\n all_sg = client.describe_security_groups()\n\n instance_sg_set = set()\n sg_set = set()\n\n for reservation in all_instances[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n for sg in instance[\"SecurityGroups\"]:\n instance_sg_set.add(sg[\"GroupName\"])\n\n for security_group in all_sg[\"SecurityGroups\"]:\n sg_set.add(security_group[\"GroupName\"])\n\n idle_sg = sg_set - instance_sg_set\n\n return idle_sg", "def dvs_remote_sg_simple(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.show_step(4)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(5)\n self.show_step(6)\n for sg in [sg1, sg2]:\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Create access_point to instances from SG1 and SG2\n _, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name, sg2.name])\n\n self.show_step(7)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n self.show_step(8)\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(9)\n self.show_step(10)\n for group in ips:\n ip_pair = dict.fromkeys(ips[group])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips[group] if key != value]\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, timeout=60 * 5)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = ips['SG2']\n openstack.check_connection_through_host(\n access_point_ip, ip_pair, result_of_command=1, timeout=60 * 5)", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")", "def AddSecurityGroupEntry(self, security_group, host=None, port=None):\n if self._conn:\n security_groups = self._conn.get_all_security_groups(groupnames=security_group)\n for sg in security_groups:\n if sg.name == security_group:\n return self._conn.authorize_security_group(sg.name, ip_protocol='tcp', from_port=port, to_port=port, cidr_ip='%s/32' % host)", "def _generate_ec2_instance_and_sg(resource):\n for instance in resource.instances.all():\n for security_group in instance.security_groups:\n yield instance, security_group", "def privilegedStartService(self):\n super(GroupOwnedUNIXServer, self).privilegedStartService()\n\n # Unfortunately, there's no public way to access this. -glyph\n fileName = self._port.port\n chown(fileName, getuid(), self.gid)", "def test_replace_cluster_policy(self):\n pass", "def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name", "def save_security_group(resp, payload):\n if resp.get(\"code\") != 0:\n return None, SaveDataError(\"Create security group failed\")\n uuid = resp[\"data\"][\"ret_set\"][0][\"id\"]\n name = payload.get(\"description\")\n sg_id = payload.get(\"sg_id\")\n zone_name = payload.get(\"zone\")\n user_name = payload.get(\"owner\")\n zone = ZoneModel.get_zone_by_name(zone_name)\n user = User.objects.get(username=user_name)\n _security_group_ins, err = RdsSecurityGroupModel.objects.create(uuid,\n sg_id,\n name,\n zone,\n user)\n return _security_group_ins, err", "def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)", "def test_break_security_group_usual_case():", "def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)", "def sp_cpu_exclusive_on(cpuset_info):\n if cpuset_info['storpool.slice']['cpuset.cpu_exclusive'] != 1:\n return ['cpuset:storpool.slice cpu_exclusive is not 1'], []\n return [], []", "def test_break_security_group_failed():", "def _test_multiple_ports_secgroup_inheritance(self):\n # create a security group and make it loginable and pingable\n secgrp = self._create_security_group('secgrp')\n self.create_loginable_secgroup_rule(\n secgroup_id=secgrp['id'])\n self.create_pingable_secgroup_rule(\n secgroup_id=secgrp['id'])\n if self.stateless_sg:\n self.create_ingress_metadata_secgroup_rule(\n secgroup_id=secgrp['id'])\n # create two ports with fixed IPs and the security group created\n ports = []\n for i in range(2):\n ports.append(self.create_port(\n self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],\n security_groups=[secgrp['id']]))\n # spawn instances with the ports created\n server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n ports=ports)\n # verify ICMP reachability and ssh connectivity\n for fip in fips:\n self.ping_ip_address(fip['floating_ip_address'])\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])", "def authorize(self, cidr_ip=None, ec2_group=None):\r\n if isinstance(ec2_group, SecurityGroup):\r\n group_name = ec2_group.name\r\n group_owner_id = ec2_group.owner_id\r\n else:\r\n group_name = None\r\n group_owner_id = None\r\n return self.connection.authorize_dbsecurity_group(self.name,\r\n cidr_ip,\r\n group_name,\r\n group_owner_id)", "def prod_load_balancer_sg_valid(self) -> None:\n if self.prod_env:\n sg_name = 'saints-xctf-prod-server-elb-security-group'\n else:\n sg_name = 'saints-xctf-dev-server-elb-security-group'\n\n response = self.ec2.describe_security_groups(Filters=[\n {\n 'Name': 'group-name',\n 'Values': [sg_name]\n }\n ])\n\n security_group = response.get('SecurityGroups')[0]\n\n self.assertTrue(all([\n security_group.get('GroupName') == sg_name,\n self.validate_load_balancer_sg_rules(\n security_group.get('IpPermissions'),\n security_group.get('IpPermissionsEgress')\n )\n ]))", "def create_security_group(group_name):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n for g in ec2.get_all_security_groups():\n if g.name == group_name:\n return # We already have this group setup\n group = ec2.create_security_group(group_name,\n \"%s SSH access group\" % group_name)\n group.authorize(\"tcp\", 22, 22, \"0.0.0.0/0\") # SSH is on port 22, all IPs\n group.authorize(\"tcp\", 80, 80, \"0.0.0.0/0\")\n group.authorize(\"tcp\", 61000, 65000, \"0.0.0.0/0\")\n print \"Created new security group\"", "def update_cluster_merge_across_nodes(request):\n ksm_merge_across_nodes = getattr(\n request.node.cls, \"ksm_merge_across_nodes\"\n )\n\n def fin():\n \"\"\"\n 1) Disable KSM\n \"\"\"\n ll_clusters.updateCluster(\n positive=True, cluster=sla_conf.CLUSTER_NAME[0], ksm_enabled=False\n )\n request.addfinalizer(fin)\n\n assert ll_clusters.updateCluster(\n positive=True,\n cluster=sla_conf.CLUSTER_NAME[0],\n ksm_enabled=True,\n ksm_merge_across_nodes=ksm_merge_across_nodes\n )", "def test_placement_group(ray_start_2_cpus):\n num_workers = 2\n bundle = {\"CPU\": 1}\n bundles = [bundle.copy() for _ in range(num_workers)]\n placement_group = ray.util.placement_group(bundles)\n wg = WorkerGroup(num_workers=num_workers, placement_group=placement_group)\n wg.remove_workers([0])\n wg.add_workers(1)", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res", "def grant_ssh_access ( ec2_conn, tgt_grps, nat_grp ) :\n for grp in tgt_grps :\n grant_grp_access( ec2_conn, [ nat_grp ], grp, 22 )", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def prevent_forking_outside_group(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"prevent_forking_outside_group\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")", "def test_replace_cluster_role_binding(self):\n pass", "def _remove_server_from_sgroup(**kwargs):\n # A10 Lightning APIs\n ServerGrpApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpPolicyApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/{3}/policies\".format(\n kwargs['applicationId'], kwargs['hostId'],\n kwargs['serviceId'], kwargs['serverGroupId'])\n \n ServerGrpImportApi = \"applications/{0}/hosts/{1}/services/{2}/servergroups/_import\".format(\n kwargs['applicationId'], kwargs['hostId'], kwargs['serviceId'])\n\n # Build the requests\n request1 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpApi)\n request2 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpPolicyApi)\n request3 = urllib2.Request(\n os.environ[\"API_BASE_URL\"] + ServerGrpImportApi)\n\n # Auth header\n cred = A10User + ':' + A10UserPassword\n bas64 = b64encode(bytes(cred))\n auth = \"Basic \" + bas64.decode(\"ascii\")\n \n # Complete header dict\n headers = {\n \"provider\": \"root\",\n \"tenant\": A10Tenant,\n \"Content-Type\": \"application/json\",\n \"Authorization\": auth\n }\n\n # Attach all the headers to the requests\n for key, value in headers.items():\n request1.add_header(key, value)\n request2.add_header(key, value)\n request3.add_header(key, value)\n\n # First retrieve the server group data\n response = urllib2.urlopen(request1)\n server_grp_data = json.loads(response.read().decode(\"utf-8\"))\n servers = server_grp_data['servers']\n \n # Remove the required server\n for serv in servers:\n if serv['ipAddress'] == _get_public_ip_addr(ServerInstanceID):\n servers.remove(serv)\n\n # Get server group policies\n response = urllib2.urlopen(request2)\n srv_policies = json.loads(response.read().decode(\"utf-8\"))\n \n # Add parsed server data and server group policies and post it\n server_grp_data['servers'] = servers\n server_grp_data['policies'] = srv_policies\n urllib2.urlopen(request3, json.dumps(server_grp_data).encode(\"utf-8\"))", "def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")", "def engine_security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def allow_sgcmd(params, sg, protocol_str, cidr, egress, force):\n ec2 = get_ec2_connection()\n sg_obj = convert_sg_name_to_sg(sg)\n protocol, target_port_low, target_port_high = parse_protocol_port_string(protocol_str)\n if is_debugging:\n print(\"Protocol %s\" % protocol)\n print(\"Target port %s to %s\" % (target_port_low, target_port_high))\n print(\"Source ID %s\" % cidr)\n cidr = expand_cidr_string(cidr)\n if egress:\n sg_obj.authorize_egress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )\n else:\n sg_obj.authorize_ingress(\n IpProtocol=protocol,\n FromPort=target_port_low,\n ToPort=target_port_high,\n CidrIp=cidr\n )", "def _split_ns_by_scatter(cls,\n shard_count,\n namespace,\n raw_entity_kind,\n app):\n if shard_count == 1:\n\n return [key_range.KeyRange(namespace=namespace, _app=app)]\n\n ds_query = datastore.Query(kind=raw_entity_kind,\n namespace=namespace,\n _app=app,\n keys_only=True)\n ds_query.Order(\"__scatter__\")\n oversampling_factor = 32\n random_keys = ds_query.Get(shard_count * oversampling_factor)\n\n if not random_keys:\n\n\n return ([key_range.KeyRange(namespace=namespace, _app=app)] +\n [None] * (shard_count - 1))\n\n random_keys.sort()\n\n if len(random_keys) >= shard_count:\n\n random_keys = cls._choose_split_points(random_keys, shard_count)\n\n k_ranges = []\n\n k_ranges.append(key_range.KeyRange(\n key_start=None,\n key_end=random_keys[0],\n direction=key_range.KeyRange.ASC,\n include_start=False,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n for i in range(0, len(random_keys) - 1):\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[i],\n key_end=random_keys[i+1],\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n k_ranges.append(key_range.KeyRange(\n key_start=random_keys[-1],\n key_end=None,\n direction=key_range.KeyRange.ASC,\n include_start=True,\n include_end=False,\n namespace=namespace,\n _app=app))\n\n if len(k_ranges) < shard_count:\n\n k_ranges += [None] * (shard_count - len(k_ranges))\n return k_ranges", "def load_security_groups(self):\n url = self.lookup(\"security_groups_url\")\n groups = self._fetcher.get_entities(url)\n if groups is None:\n return\n\n group_names = [group['name']\n for group in groups if group['running_default'] is False]\n # at this point the group_names contain all the running groups in addition\n # to the groups assigned to this space.\n # That's why we need to remove the duplicates\n group_names = list(set(group_names))\n\n for name in group_names:\n self._security_groups.append({'name': name})", "def add_secgroup(self, name=None, description=None):\n # print (\"UUUU\")\n if self.cloudman:\n if description is None:\n description = name\n try:\n self.cloudman.network.create_security_group(\n name=name,\n description=description)\n except:\n Console.warning(f\"secgroup {name} already exists in cloud. \"\n f\"skipping.\")\n else:\n raise ValueError(\"cloud not initialized\")", "def list_secgroups(self, name=None):\n groups = self.cloudman.network.security_groups()\n\n # print (\"TTTTT\")\n # for g in groups:\n # pprint(g)\n\n if name is not None:\n for entry in groups:\n\n if entry['name'] == name:\n groups = [entry]\n break\n\n return self.get_list(\n groups,\n kind=\"secgroup\")", "def dvs_instances_batch_mix_sg(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n self.show_step(2)\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n # Get max count of instances which we can create according to\n # resource limit\n vm_count = min(\n [os_conn.nova.hypervisors.resource_class.to_dict(h)['vcpus']\n for h in os_conn.nova.hypervisors.list()])\n\n self.show_step(3)\n self.show_step(4)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n self.icmp[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n self.icmp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n self.icmp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.icmp)\n\n for sg in [sg1, sg2]:\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"remote_group_id\"] = sg.id\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n # Add rules for ssh and ping\n os_conn.goodbye_security()\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenant_id = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenant_id and\n sg['name'] == 'default'][0]\n\n for step in (5, 9):\n self.show_step(step) # step 5, 9\n step += 1\n self.show_step(step) # step 6, 10\n step += 1\n\n openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=vm_count,\n security_groups=[sg1.name, sg2.name, default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n self.show_step(step) # step 7, 11\n step += 1\n\n instances = os_conn.nova.servers.list()\n fip = openstack.create_and_assign_floating_ips(os_conn, instances)\n ip_pair = dict.fromkeys(fip)\n for key in ip_pair:\n ip_pair[key] = [value for value in fip if key != value]\n openstack.check_connection_vms(ip_pair,\n timeout=60 * 5,\n interval=10)\n\n self.show_step(step) # step 8, 12\n step += 1\n\n for instance in instances:\n os_conn.nova.servers.delete(instance)\n logger.info(\"Check that instance was deleted.\")\n os_conn.verify_srv_deleted(instance)", "def is_sgw_ce_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"sg_ce\"]\n except KeyError:\n return False", "def test_patch_cluster_resource_quota(self):\n pass", "def test_intra_sg_isolation(self):\n # create a security group and make it loginable\n secgrp = self._create_security_group('secgrp')\n\n # remove all rules and add ICMP, DHCP and metadata as egress,\n # and ssh as ingress.\n for sgr in secgrp['security_group_rules']:\n self.client.delete_security_group_rule(sgr['id'])\n\n self.create_loginable_secgroup_rule(secgroup_id=secgrp['id'])\n rule_list = [{'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_TCP,\n 'remote_ip_prefix': '169.254.169.254/32',\n 'description': 'metadata out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_UDP,\n 'port_range_min': '67',\n 'port_range_max': '67',\n 'description': 'dhcpv4 out',\n },\n {'direction': constants.EGRESS_DIRECTION,\n 'protocol': constants.PROTO_NAME_ICMP,\n 'description': 'ping out',\n },\n ]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # go vms, go!\n ssh_clients, fips, servers = self.create_vm_testing_sec_grp(\n num_servers=2,\n security_groups=[{'name': secgrp['name']}])\n\n # verify SSH functionality. This will ensure that servers were\n # able to reach dhcp + metadata servers\n for fip in fips:\n self.check_connectivity(fip['floating_ip_address'],\n CONF.validation.image_ssh_user,\n self.keypair['private_key'])\n\n # try to ping instances without intra SG permission (should fail)\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'],\n should_succeed=False)\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'],\n should_succeed=False)\n\n # add intra sg rule. This will allow packets from servers that\n # are in the same sg\n rule_list = [{'direction': constants.INGRESS_DIRECTION,\n 'remote_group_id': secgrp['id']}]\n self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])\n\n # try to ping instances with intra SG permission\n self.check_remote_connectivity(\n ssh_clients[0], fips[1]['fixed_ip_address'])\n self.check_remote_connectivity(\n ssh_clients[1], fips[0]['fixed_ip_address'])", "def engine_security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_security_group_id\")", "def security_group_id_for_domain_boundary(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id_for_domain_boundary\")", "def modify_security_group_configuration(\n self,\n request: dds_20151201_models.ModifySecurityGroupConfigurationRequest,\n ) -> dds_20151201_models.ModifySecurityGroupConfigurationResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_security_group_configuration_with_options(request, runtime)", "def test_patch_cluster_policy(self):\n pass", "def update(self, security_group_id: str, body: CloudSecurityGroup) -> None:\n\t\troute = f'{AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value}/{security_group_id}'\n\t\treturn self._put(route=route, body=body)" ]
[ "0.60393614", "0.59497327", "0.57599705", "0.573656", "0.5651098", "0.5548846", "0.5461813", "0.54219145", "0.5417648", "0.5402027", "0.5399483", "0.5392111", "0.53635854", "0.5351217", "0.53140545", "0.5263554", "0.5227952", "0.52270705", "0.52252376", "0.5210519", "0.5165917", "0.51567715", "0.5145789", "0.5138254", "0.5111076", "0.51034313", "0.5087529", "0.5086714", "0.5075627", "0.50576687", "0.50575453", "0.5049828", "0.5033157", "0.5031087", "0.5029837", "0.50240165", "0.5021425", "0.50197774", "0.49932504", "0.49900985", "0.49766633", "0.4971953", "0.4965839", "0.49432492", "0.49330613", "0.49241033", "0.49236128", "0.4913157", "0.48997554", "0.48979387", "0.4889481", "0.48814127", "0.48759946", "0.486464", "0.48638123", "0.48603457", "0.4849742", "0.48465526", "0.48465526", "0.48359016", "0.48339802", "0.4824042", "0.48161334", "0.4815542", "0.48117423", "0.4808991", "0.48057356", "0.48057148", "0.4793328", "0.4793129", "0.47902936", "0.4789782", "0.4786362", "0.47860664", "0.4771275", "0.47711098", "0.47690088", "0.47683498", "0.47668663", "0.4766751", "0.4766751", "0.4757104", "0.4757104", "0.4752596", "0.47456518", "0.47420132", "0.47417107", "0.47364396", "0.47242218", "0.47230354", "0.47209692", "0.47201923", "0.47191194", "0.47134635", "0.47094736", "0.4688195", "0.46878752", "0.46859014", "0.46845007", "0.46832672", "0.4674398" ]
0.0
-1