_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q18100
CloudDNSManager.list_subdomains_next_page
train
def list_subdomains_next_page(self): """ When paging through subdomain results, this will return the next page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("subdomain", {}).get("next_uri") if uri is None: raise exc.NoMoreResults("There are no more pages of subdomains " "to list.") return self._list_subdomains(uri)
python
{ "resource": "" }
q18101
CloudDNSManager.list_records_previous_page
train
def list_records_previous_page(self): """ When paging through record results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("record", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of records " "to list.") return self._list_records(uri)
python
{ "resource": "" }
q18102
CloudDNSManager.list_records_next_page
train
def list_records_next_page(self): """ When paging through record results, this will return the next page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("record", {}).get("next_uri") if uri is None: raise exc.NoMoreResults("There are no more pages of records to list.") return self._list_records(uri)
python
{ "resource": "" }
q18103
CloudDNSManager.get_record
train
def get_record(self, domain, record): """ Gets the full information for an existing record for this domain. """ rec_id = utils.get_id(record) domain_id = utils.get_id(domain) uri = "/domains/%s/records/%s" % (domain_id, rec_id) resp, resp_body = self._retry_get(uri) resp_body["domain_id"] = domain_id return CloudDNSRecord(self, resp_body, loaded=False)
python
{ "resource": "" }
q18104
CloudDNSManager.update_records
train
def update_records(self, domain, records): """ Modifies an existing records for a domain. """ if not isinstance(records, list): raise TypeError("Expected records of type list") uri = "/domains/%s/records" % utils.get_id(domain) resp, resp_body = self._async_call(uri, method="PUT", body={"records": records}, error_class=exc.DomainRecordUpdateFailed, has_response=False) return resp_body
python
{ "resource": "" }
q18105
CloudDNSManager.delete_record
train
def delete_record(self, domain, record): """ Deletes an existing record for a domain. """ uri = "/domains/%s/records/%s" % (utils.get_id(domain), utils.get_id(record)) resp, resp_body = self._async_call(uri, method="DELETE", error_class=exc.DomainRecordDeletionFailed, has_response=False) return resp_body
python
{ "resource": "" }
q18106
CloudDNSManager._get_ptr_details
train
def _get_ptr_details(self, device, device_type): """ Takes a device and device type and returns the corresponding HREF link and service name for use with PTR record management. """ context = self.api.identity region = self.api.region_name if device_type.lower().startswith("load"): ep = pyrax._get_service_endpoint(context, "load_balancer", region) svc = "loadbalancers" svc_name = "cloudLoadBalancers" else: ep = pyrax._get_service_endpoint(context, "compute", region) svc = "servers" svc_name = "cloudServersOpenStack" href = "%s/%s/%s" % (ep, svc, utils.get_id(device)) return (href, svc_name)
python
{ "resource": "" }
q18107
CloudDNSManager._resolve_device_type
train
def _resolve_device_type(self, device): """ Given a device, determines if it is a CloudServer, a CloudLoadBalancer, or an invalid device. """ try: from tests.unit import fakes server_types = (pyrax.CloudServer, fakes.FakeServer) lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer, fakes.FakeDNSDevice) except ImportError: # Not running with tests server_types = (pyrax.CloudServer, ) lb_types = (CloudLoadBalancer, ) if isinstance(device, server_types): device_type = "server" elif isinstance(device, lb_types): device_type = "loadbalancer" else: raise exc.InvalidDeviceType("The device '%s' must be a CloudServer " "or a CloudLoadBalancer." % device) return device_type
python
{ "resource": "" }
q18108
CloudDNSManager.list_ptr_records
train
def list_ptr_records(self, device): """ Returns a list of all PTR records configured for this device. """ device_type = self._resolve_device_type(device) href, svc_name = self._get_ptr_details(device, device_type) uri = "/rdns/%s?href=%s" % (svc_name, href) try: resp, resp_body = self._retry_get(uri) except exc.NotFound: return [] records = [CloudDNSPTRRecord(rec, device) for rec in resp_body.get("records", [])] return records
python
{ "resource": "" }
q18109
CloudDNSManager.add_ptr_records
train
def add_ptr_records(self, device, records): """ Adds one or more PTR records to the specified device. """ device_type = self._resolve_device_type(device) href, svc_name = self._get_ptr_details(device, device_type) if not isinstance(records, (list, tuple)): records = [records] body = {"recordsList": { "records": records}, "link": { "content": "", "href": href, "rel": svc_name, }} uri = "/rdns" # This is a necessary hack, so here's why: if you attempt to add # PTR records to device, and you don't have rights to either the device # or the IP address, the DNS API will return a 401 - Unauthorized. # Unfortunately, the pyrax client interprets this as a bad auth token, # and there is no way to distinguish this from an actual authentication # failure. The client will attempt to re-authenticate as a result, and # will fail, due to the DNS API not having regional endpoints. The net # result is that an EndpointNotFound exception will be raised, which # we catch here and then raise a more meaningful exception. # The Rackspace DNS team is working on changing this to return a 403 # instead; when that happens this kludge can go away. try: resp, resp_body = self._async_call(uri, body=body, method="POST", error_class=exc.PTRRecordCreationFailed) except exc.EndpointNotFound: raise exc.InvalidPTRRecord("The domain/IP address information is not " "valid for this device.") return resp_body.get("records") records = [CloudDNSPTRRecord(rec, device) for rec in resp_body.get("records", [])] return records
python
{ "resource": "" }
q18110
CloudDNSClient.method_get
train
def method_get(self, uri, **kwargs): """ Overload the method_get function in order to retry on empty body responses from the Cloud DNS API """ for i in six.moves.range(3): resp, body = super(CloudDNSClient, self).method_get(uri, **kwargs) if body: return resp, body raise exc.ServiceResponseFailure("The Cloud DNS service failed to " "respond to the request.")
python
{ "resource": "" }
q18111
CloudDNSClient.list
train
def list(self, limit=None, offset=None): """Returns a list of all resources.""" return self._manager.list(limit=limit, offset=offset)
python
{ "resource": "" }
q18112
CloudDNSClient.list_subdomains
train
def list_subdomains(self, domain, limit=None, offset=None): """ Returns a list of all subdomains for the specified domain. """ return domain.list_subdomains(limit=limit, offset=offset)
python
{ "resource": "" }
q18113
CloudDNSClient.get_subdomain_iterator
train
def get_subdomain_iterator(self, domain, limit=None, offset=None): """ Returns an iterator that will return each available subdomain for the specified domain. If there are more than the limit of 100 subdomains, the iterator will continue to fetch subdomains from the API until all subdomains have been returned. """ return SubdomainResultsIterator(self._manager, domain=domain)
python
{ "resource": "" }
q18114
CloudDNSClient.get_absolute_limits
train
def get_absolute_limits(self): """ Returns a dict with the absolute limits for the current account. """ resp, body = self.method_get("/limits") absolute_limits = body.get("limits", {}).get("absolute") return absolute_limits
python
{ "resource": "" }
q18115
CloudDNSClient.get_rate_limits
train
def get_rate_limits(self): """ Returns a dict with the current rate limit information for domain and status requests. """ resp, body = self.method_get("/limits") rate_limits = body.get("limits", {}).get("rate") ret = [] for rate_limit in rate_limits: limits = rate_limit["limit"] uri_limits = {"uri": rate_limit["uri"], "limits": limits} ret.append(uri_limits) return ret
python
{ "resource": "" }
q18116
RaxIdentity._read_credential_file
train
def _read_credential_file(self, cfg): """ Parses the credential file with Rackspace-specific labels. """ self.username = cfg.get("rackspace_cloud", "username") try: self.password = cfg.get("rackspace_cloud", "api_key", raw=True) except ConfigParser.NoOptionError as e: # Allow either the use of either 'api_key' or 'password'. self.password = cfg.get("rackspace_cloud", "password", raw=True)
python
{ "resource": "" }
q18117
RaxIdentity.set_credentials
train
def set_credentials(self, username, password=None, region=None, tenant_id=None, authenticate=False): """ Sets the username and password directly. Because Rackspace auth uses the api_key, make sure that any old values are cleared. """ self.api_key = None super(RaxIdentity, self).set_credentials(username, password=password, region=region, tenant_id=tenant_id, authenticate=authenticate)
python
{ "resource": "" }
q18118
RaxIdentity.authenticate
train
def authenticate(self, username=None, password=None, api_key=None, tenant_id=None, connect=False): """ If the user's credentials include an API key, the default behavior will work. But if they are using a password, the initial attempt will fail, so try again, but this time using the standard password format. The 'connect' parameter is retained for backwards compatibility. It no longer has any effect. """ try: super(RaxIdentity, self).authenticate(username=username, password=password, api_key=api_key, tenant_id=tenant_id) except exc.AuthenticationFailed: self._creds_style = "password" super(RaxIdentity, self).authenticate(username=username, password=password, api_key=api_key, tenant_id=tenant_id)
python
{ "resource": "" }
q18119
RaxIdentity.auth_with_token
train
def auth_with_token(self, token, tenant_id=None, tenant_name=None): """ If a valid token is already known, this call will use it to generate the service catalog. """ # Implementation note: # Rackspace auth uses one tenant ID for the object_store services and # another for everything else. The one that the user would know is the # 'everything else' ID, so we need to extract the object_store tenant # ID from the initial response, and call the superclass # auth_with_token() method a second time with that tenant ID to get the # object_store endpoints. We can then add these to the initial # endpoints returned by the primary tenant ID, and then continue with # the auth process. main_resp, main_body = self._call_token_auth(token, tenant_id, tenant_name) # Get the swift tenant ID roles = main_body["access"]["user"]["roles"] ostore = [role for role in roles if role["name"] == "object-store:default"] if ostore: ostore_tenant_id = ostore[0]["tenantId"] ostore_resp, ostore_body = self._call_token_auth(token, ostore_tenant_id, None) ostore_cat = ostore_body["access"]["serviceCatalog"] main_cat = main_body["access"]["serviceCatalog"] main_cat.extend(ostore_cat) self._parse_response(main_body) self.authenticated = True
python
{ "resource": "" }
q18120
RaxIdentity.get_user
train
def get_user(self, user_id=None, username=None, email=None): """ Returns the user specified by either ID, username or email. Since more than user can have the same email address, searching by that term will return a list of 1 or more User objects. Searching by username or ID will return a single User. If a user_id that doesn't belong to the current account is searched for, a Forbidden exception is raised. When searching by username or email, a NotFound exception is raised if there is no matching user. """ if user_id: uri = "/users/%s" % user_id elif username: uri = "/users?name=%s" % username elif email: uri = "/users?email=%s" % email else: raise ValueError("You must include one of 'user_id', " "'username', or 'email' when calling get_user().") resp, resp_body = self.method_get(uri) if resp.status_code == 404: raise exc.NotFound("No such user exists.") users = resp_body.get("users", []) if users: return [User(self, user) for user in users] else: user = resp_body.get("user", {}) if user: return User(self, user) else: raise exc.NotFound("No such user exists.")
python
{ "resource": "" }
q18121
RaxIdentity.update_user
train
def update_user(self, user, email=None, username=None, uid=None, defaultRegion=None, enabled=None): """ Allows you to update settings for a given user. """ user_id = utils.get_id(user) uri = "users/%s" % user_id upd = {"id": user_id} if email is not None: upd["email"] = email if defaultRegion is not None: upd["RAX-AUTH:defaultRegion"] = defaultRegion if username is not None: upd["username"] = username if enabled is not None: upd["enabled"] = enabled data = {"user": upd} resp, resp_body = self.method_put(uri, data=data) if resp.status_code in (401, 403, 404): raise exc.AuthorizationFailure("You are not authorized to update " "users.") return User(self, resp_body)
python
{ "resource": "" }
q18122
RaxIdentity.reset_api_key
train
def reset_api_key(self, user=None): """ Resets the API key for the specified user, or if no user is specified, for the current user. Returns the newly-created API key. Resetting an API key does not invalidate any authenticated sessions, nor does it revoke any tokens. """ if user is None: user_id = utils.get_id(self) else: user_id = utils.get_id(user) uri = "users/%s/OS-KSADM/credentials/" % user_id uri += "RAX-KSKEY:apiKeyCredentials/RAX-AUTH/reset" resp, resp_body = self.method_post(uri) return resp_body.get("RAX-KSKEY:apiKeyCredentials", {}).get("apiKey")
python
{ "resource": "" }
q18123
ScalingGroup._make_policies
train
def _make_policies(self): """ Convert the 'scalingPolicies' dictionary into AutoScalePolicy objects. """ self.policies = [AutoScalePolicy(self.manager, dct, self) for dct in self.scalingPolicies]
python
{ "resource": "" }
q18124
ScalingGroup.update
train
def update(self, name=None, cooldown=None, min_entities=None, max_entities=None, metadata=None): """ Updates this ScalingGroup. One or more of the attributes can be specified. NOTE: if you specify metadata, it will *replace* any existing metadata. If you want to add to it, you either need to pass the complete dict of metadata, or call the update_metadata() method. """ return self.manager.update(self, name=name, cooldown=cooldown, min_entities=min_entities, max_entities=max_entities, metadata=metadata)
python
{ "resource": "" }
q18125
ScalingGroup.add_policy
train
def add_policy(self, name, policy_type, cooldown, change=None, is_percent=False, desired_capacity=None, args=None): """ Adds a policy with the given values to this scaling group. The 'change' parameter is treated as an absolute amount, unless 'is_percent' is True, in which case it is treated as a percentage. """ return self.manager.add_policy(self, name, policy_type, cooldown, change=change, is_percent=is_percent, desired_capacity=desired_capacity, args=args)
python
{ "resource": "" }
q18126
ScalingGroup.delete_policy
train
def delete_policy(self, policy): """ Deletes the specified policy from this scaling group. """ return self.manager.delete_policy(scaling_group=self, policy=policy)
python
{ "resource": "" }
q18127
ScalingGroupManager.get_state
train
def get_state(self, scaling_group): """ Returns the current state of the specified scaling group as a dictionary. """ uri = "/%s/%s/state" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_get(uri) data = resp_body["group"] ret = {} ret["active"] = [itm["id"] for itm in data["active"]] ret["active_capacity"] = data["activeCapacity"] ret["desired_capacity"] = data["desiredCapacity"] ret["pending_capacity"] = data["pendingCapacity"] ret["paused"] = data["paused"] return ret
python
{ "resource": "" }
q18128
ScalingGroupManager.pause
train
def pause(self, scaling_group): """ Pauses all execution of the policies for the specified scaling group. """ uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_post(uri) return None
python
{ "resource": "" }
q18129
ScalingGroupManager.get_configuration
train
def get_configuration(self, scaling_group): """ Returns the scaling group's configuration in a dictionary. """ uri = "/%s/%s/config" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_get(uri) return resp_body.get("groupConfiguration")
python
{ "resource": "" }
q18130
ScalingGroupManager.replace
train
def replace(self, scaling_group, name, cooldown, min_entities, max_entities, metadata=None): """ Replace an existing ScalingGroup configuration. All of the attributes must be specified If you wish to delete any of the optional attributes, pass them in as None. """ body = self._create_group_config_body(name, cooldown, min_entities, max_entities, metadata=metadata) group_id = utils.get_id(scaling_group) uri = "/%s/%s/config" % (self.uri_base, group_id) resp, resp_body = self.api.method_put(uri, body=body)
python
{ "resource": "" }
q18131
ScalingGroupManager.update_metadata
train
def update_metadata(self, scaling_group, metadata): """ Adds the given metadata dict to the existing metadata for the scaling group. """ if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.metadata curr_meta.update(metadata) return self.update(scaling_group, metadata=curr_meta)
python
{ "resource": "" }
q18132
ScalingGroupManager.get_launch_config
train
def get_launch_config(self, scaling_group): """ Returns the launch configuration for the specified scaling group. """ key_map = { "OS-DCF:diskConfig": "disk_config", "flavorRef": "flavor", "imageRef": "image", } uri = "/%s/%s/launch" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_get(uri) ret = {} data = resp_body.get("launchConfiguration") ret["type"] = data.get("type") args = data.get("args", {}) ret["load_balancers"] = args.get("loadBalancers") for key, value in args.get("server", {}).items(): norm_key = key_map.get(key, key) ret[norm_key] = value return ret
python
{ "resource": "" }
q18133
ScalingGroupManager.update_launch_metadata
train
def update_launch_metadata(self, scaling_group, metadata): """ Adds the given metadata dict to the existing metadata for the scaling group's launch configuration. """ if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.launchConfiguration.get("args", {}).get( "server", {}).get("metadata", {}) curr_meta.update(metadata) return self.update_launch_config(scaling_group, metadata=curr_meta)
python
{ "resource": "" }
q18134
ScalingGroupManager.list_policies
train
def list_policies(self, scaling_group): """ Returns a list of all policies defined for the specified scaling group. """ uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_get(uri) return [AutoScalePolicy(self, data, scaling_group) for data in resp_body.get("policies", [])]
python
{ "resource": "" }
q18135
ScalingGroupManager.replace_policy
train
def replace_policy(self, scaling_group, policy, name, policy_type, cooldown, change=None, is_percent=False, desired_capacity=None, args=None): """ Replace an existing policy. All of the attributes must be specified. If you wish to delete any of the optional attributes, pass them in as None. """ policy_id = utils.get_id(policy) group_id = utils.get_id(scaling_group) uri = "/%s/%s/policies/%s" % (self.uri_base, group_id, policy_id) body = self._create_policy_body(name=name, policy_type=policy_type, cooldown=cooldown, change=change, is_percent=is_percent, desired_capacity=desired_capacity, args=args) resp, resp_body = self.api.method_put(uri, body=body)
python
{ "resource": "" }
q18136
ScalingGroupManager.list_webhooks
train
def list_webhooks(self, scaling_group, policy): """ Returns a list of all webhooks for the specified policy. """ uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base, utils.get_id(scaling_group), utils.get_id(policy)) resp, resp_body = self.api.method_get(uri) return [AutoScaleWebhook(self, data, policy, scaling_group) for data in resp_body.get("webhooks", [])]
python
{ "resource": "" }
q18137
ScalingGroupManager._resolve_lbs
train
def _resolve_lbs(load_balancers): """ Takes either a single LB reference or a list of references and returns the dictionary required for creating a Scaling Group. References can be either a dict that matches the structure required by the autoscale API, a CloudLoadBalancer instance, or the ID of the load balancer. """ lb_args = [] if not isinstance(load_balancers, list): lbs = [load_balancers] else: lbs = load_balancers for lb in lbs: if isinstance(lb, dict): lb_args.append(lb) elif isinstance(lb, CloudLoadBalancer): lb_args.append({ "loadBalancerId": lb.id, "port": lb.port, }) elif isinstance(lb, tuple): lb_args.append({"loadBalancerId": lb[0], "port": lb[1]}) else: # See if it's an ID for a Load Balancer try: instance = pyrax.cloud_loadbalancers.get(lb) except Exception: raise exc.InvalidLoadBalancer("Received an invalid " "specification for a Load Balancer: '%s'" % lb) lb_args.append({ "loadBalancerId": instance.id, "port": instance.port, }) return lb_args
python
{ "resource": "" }
q18138
ScalingGroupManager._encode_personality
train
def _encode_personality(self, personality): """ Personality files must be base64-encoded before transmitting. """ if personality is None: personality = [] else: personality = utils.coerce_to_list(personality) for pfile in personality: if "contents" in pfile: pfile["contents"] = base64.b64encode(pfile["contents"]) return personality
python
{ "resource": "" }
q18139
AutoScalePolicy.add_webhook
train
def add_webhook(self, name, metadata=None): """ Adds a webhook to this policy. """ return self.manager.add_webhook(self.scaling_group, self, name, metadata=metadata)
python
{ "resource": "" }
q18140
AutoScalePolicy.delete_webhook
train
def delete_webhook(self, webhook): """ Deletes the specified webhook from this policy. """ return self.manager.delete_webhook(self.scaling_group, self, webhook)
python
{ "resource": "" }
q18141
AutoScaleWebhook.update
train
def update(self, name=None, metadata=None): """ Updates this webhook. One or more of the parameters may be specified. """ return self.policy.update_webhook(self, name=name, metadata=metadata)
python
{ "resource": "" }
q18142
AutoScaleClient._configure_manager
train
def _configure_manager(self): """ Creates a manager to handle autoscale operations. """ self._manager = ScalingGroupManager(self, resource_class=ScalingGroup, response_key="group", uri_base="groups")
python
{ "resource": "" }
q18143
AutoScaleClient.replace
train
def replace(self, scaling_group, name, cooldown, min_entities, max_entities, metadata=None): """ Replace an existing ScalingGroup configuration. All of the attributes must be specified. If you wish to delete any of the optional attributes, pass them in as None. """ return self._manager.replace(scaling_group, name, cooldown, min_entities, max_entities, metadata=metadata)
python
{ "resource": "" }
q18144
AutoScaleClient.execute_policy
train
def execute_policy(self, scaling_group, policy): """ Executes the specified policy for the scaling group. """ return self._manager.execute_policy(scaling_group=scaling_group, policy=policy)
python
{ "resource": "" }
q18145
AutoScaleClient.delete_webhook
train
def delete_webhook(self, scaling_group, policy, webhook): """ Deletes the specified webhook from the policy. """ return self._manager.delete_webhook(scaling_group, policy, webhook)
python
{ "resource": "" }
q18146
BaseResource.human_id
train
def human_id(self): """Subclasses may override this to provide a pretty ID which can be used for bash completion. """ if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID: return utils.to_slug(getattr(self, self.NAME_ATTR)) return None
python
{ "resource": "" }
q18147
BaseResource._add_details
train
def _add_details(self, info): """ Takes the dict returned by the API call and sets the corresponding attributes on the object. """ for (key, val) in six.iteritems(info): if isinstance(key, six.text_type) and six.PY2: key = key.encode(pyrax.get_encoding()) elif isinstance(key, bytes): key = key.decode("utf-8") setattr(self, key, val)
python
{ "resource": "" }
q18148
BaseResource.get
train
def get(self): """Gets the details for the object.""" # set 'loaded' first ... so if we have to bail, we know we tried. self.loaded = True if not hasattr(self.manager, "get"): return if not self.get_details: return new = self.manager.get(self) if new: self._add_details(new._info)
python
{ "resource": "" }
q18149
BaseResource.delete
train
def delete(self): """Deletes the object.""" # set 'loaded' first ... so if we have to bail, we know we tried. self.loaded = True if not hasattr(self.manager, "delete"): return self.manager.delete(self)
python
{ "resource": "" }
q18150
BaseClient.request
train
def request(self, uri, method, *args, **kwargs): """ Formats the request into a dict representing the headers and body that will be used to make the API call. """ if self.timeout: kwargs["timeout"] = self.timeout kwargs["verify"] = self.verify_ssl kwargs.setdefault("headers", kwargs.get("headers", {})) kwargs["headers"]["User-Agent"] = self.user_agent kwargs["headers"]["Accept"] = "application/json" if ("body" in kwargs) or ("data" in kwargs): if "Content-Type" not in kwargs["headers"]: kwargs["headers"]["Content-Type"] = "application/json" elif kwargs["headers"]["Content-Type"] is None: del kwargs["headers"]["Content-Type"] # Allow subclasses to add their own headers self._add_custom_headers(kwargs["headers"]) resp, body = pyrax.http.request(method, uri, *args, **kwargs) if resp.status_code >= 400: raise exc.from_response(resp, body) return resp, body
python
{ "resource": "" }
q18151
BaseClient._time_request
train
def _time_request(self, uri, method, **kwargs): """Wraps the request call and records the elapsed time.""" start_time = time.time() resp, body = self.request(uri, method, **kwargs) self.times.append(("%s %s" % (method, uri), start_time, time.time())) return resp, body
python
{ "resource": "" }
q18152
BaseClient._api_request
train
def _api_request(self, uri, method, **kwargs): """ Manages the request by adding any auth information, and retries the request after authenticating if the initial request returned and Unauthorized exception. """ id_svc = self.identity if not all((self.management_url, id_svc.token, id_svc.tenant_id)): id_svc.authenticate() if not self.management_url: # We've authenticated but no management_url has been set. This # indicates that the service is not available. raise exc.ServiceNotAvailable("The '%s' service is not available." % self) if uri.startswith("http"): parsed = list(urllib.parse.urlparse(uri)) for pos, item in enumerate(parsed): if pos < 2: # Don't escape the scheme or netloc continue parsed[pos] = _safe_quote(parsed[pos]) safe_uri = urllib.parse.urlunparse(parsed) else: safe_uri = "%s%s" % (self.management_url, _safe_quote(uri)) # Perform the request once. If we get a 401 back then it # might be because the auth token expired, so try to # re-authenticate and try again. If it still fails, bail. try: kwargs.setdefault("headers", {})["X-Auth-Token"] = id_svc.token if id_svc.tenant_id: kwargs["headers"]["X-Auth-Project-Id"] = id_svc.tenant_id resp, body = self._time_request(safe_uri, method, **kwargs) return resp, body except exc.Unauthorized as ex: try: id_svc.authenticate() kwargs["headers"]["X-Auth-Token"] = id_svc.token resp, body = self._time_request(safe_uri, method, **kwargs) return resp, body except exc.Unauthorized: raise ex
python
{ "resource": "" }
q18153
BaseManager.head
train
def head(self, item): """Makes a HEAD request on a specific item.""" uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) return self._head(uri)
python
{ "resource": "" }
q18154
BaseManager.get
train
def get(self, item): """Gets a specific item.""" uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) return self._get(uri)
python
{ "resource": "" }
q18155
BaseManager.delete
train
def delete(self, item): """Deletes the specified item.""" uri = "/%s/%s" % (self.uri_base, utils.get_id(item)) return self._delete(uri)
python
{ "resource": "" }
q18156
BaseManager._data_from_response
train
def _data_from_response(self, resp_body, key=None): """ This works for most API responses, but some don't structure their listing responses the same way, so overriding this method allows subclasses to handle extraction for those outliers. """ if key: data = resp_body.get(key) else: data = resp_body.get(self.plural_response_key, resp_body) # NOTE(ja): some services, such as keystone returns values as list as # {"values": [ ... ]} unlike other services which just return the # list. if isinstance(data, dict): try: data = data["values"] except KeyError: pass return data
python
{ "resource": "" }
q18157
BaseManager._head
train
def _head(self, uri): """ Handles the communication with the API when performing a HEAD request on a specific resource managed by this class. Returns the headers contained in the response. """ resp, resp_body = self.api.method_head(uri) return resp
python
{ "resource": "" }
q18158
BaseManager._update
train
def _update(self, uri, body, **kwargs): """ Handles the communication with the API when updating a specific resource managed by this class. """ self.run_hooks("modify_body_for_update", body, **kwargs) resp, resp_body = self.api.method_put(uri, body=body) return resp_body
python
{ "resource": "" }
q18159
BaseManager.action
train
def action(self, item, action_type, body={}): """ Several API calls are lumped under the 'action' API. This is the generic handler for such calls. """ uri = "/%s/%s/action" % (self.uri_base, utils.get_id(item)) action_body = {action_type: body} return self.api.method_post(uri, body=action_body)
python
{ "resource": "" }
q18160
Service._ep_for_region
train
def _ep_for_region(self, region): """ Given a region, returns the Endpoint for that region, or the Endpoint for the ALL region if no match is found. If no match is found, None is returned, and it is up to the calling method to handle it appropriately. """ rgn = region.upper() try: rgn_ep = [ep for ep in list(self.endpoints.values()) if ep.region.upper() == rgn][0] except IndexError: # See if there is an 'ALL' region. try: rgn_ep = [ep for ep in list(self.endpoints.values()) if ep.region.upper() == "ALL"][0] except IndexError: rgn_ep = None return rgn_ep
python
{ "resource": "" }
q18161
Service.get_client
train
def get_client(self, region): """ Returns an instance of the appropriate client class for the given region. If there is no endpoint for that region, a NoEndpointForRegion exception is raised. """ ep = self._ep_for_region(region) if not ep: raise exc.NoEndpointForRegion("There is no endpoint defined for the " "region '%s' for the '%s' service." % (region, self.service_type)) return ep.client
python
{ "resource": "" }
q18162
Endpoint.get_new_client
train
def get_new_client(self, public=True): """ Returns a new instance of the client for this endpoint. """ return self._get_client(public=public, cached=False)
python
{ "resource": "" }
q18163
Endpoint.get
train
def get(self, url_type): """ Accepts either 'public' or 'private' as a parameter, and returns the corresponding value for 'public_url' or 'private_url', respectively. """ lowtype = url_type.lower() if lowtype == "public": return self.public_url elif lowtype == "private": return self.private_url else: raise ValueError("Valid values are 'public' or 'private'; " "received '%s'." % url_type)
python
{ "resource": "" }
q18164
Endpoint._create_client
train
def _create_client(self, clt_class, url, public=True, special=False): """ Creates a client instance for the service. """ if self.service == "compute" and not special: # Novaclient requires different parameters. client = pyrax.connect_to_cloudservers(region=self.region, context=self.identity, verify_ssl=self.verify_ssl) client.identity = self.identity else: client = clt_class(self.identity, region_name=self.region, management_url=url, verify_ssl=self.verify_ssl) return client
python
{ "resource": "" }
q18165
BaseIdentity.set_credentials
train
def set_credentials(self, username, password=None, region=None, tenant_id=None, authenticate=False): """Sets the username and password directly.""" self.username = username self.password = password self.tenant_id = tenant_id if region: self.region = region if authenticate: self.authenticate()
python
{ "resource": "" }
q18166
BaseIdentity.auth_with_token
train
def auth_with_token(self, token, tenant_id=None, tenant_name=None): """ If a valid token is already known, this call uses it to generate the service catalog. """ resp, resp_body = self._call_token_auth(token, tenant_id, tenant_name) self._parse_response(resp_body) self.authenticated = True
python
{ "resource": "" }
q18167
BaseIdentity._format_credentials
train
def _format_credentials(self): """ Returns the current credentials in the format expected by the authentication service. """ tenant_name = self.tenant_name or self.username tenant_id = self.tenant_id or self.username return {"auth": {"passwordCredentials": {"username": tenant_name, "password": self.password, }, "tenantId": tenant_id}}
python
{ "resource": "" }
q18168
BaseIdentity.authenticate
train
def authenticate(self, username=None, password=None, api_key=None, tenant_id=None, connect=False): """ Using the supplied credentials, connects to the specified authentication endpoint and attempts to log in. Credentials can either be passed directly to this method, or previously-stored credentials can be used. If authentication is successful, the token and service catalog information is stored, and clients for each service and region are created. The 'connect' parameter is retained for backwards compatibility. It no longer has any effect. """ self.username = username or self.username or pyrax.get_setting( "username") # Different identity systems may pass these under inconsistent names. self.password = password or self.password or api_key or self.api_key self.api_key = api_key or self.api_key or self.password self.tenant_id = tenant_id or self.tenant_id or pyrax.get_setting( "tenant_id") creds = self._format_credentials() headers = {"Content-Type": "application/json", "Accept": "application/json", } resp, resp_body = self.method_post("tokens", data=creds, headers=headers, std_headers=False) if resp.status_code == 401: # Invalid authorization raise exc.AuthenticationFailed("Incorrect/unauthorized " "credentials received") elif 500 <= resp.status_code < 600: # Internal Server Error try: error_msg = resp_body[list(resp_body.keys())[0]]["message"] except (KeyError, AttributeError): error_msg = "Service Currently Unavailable" raise exc.InternalServerError(error_msg) elif resp.status_code > 299: try: msg = resp_body[list(resp_body.keys())[0]]["message"] except (KeyError, AttributeError): msg = None if msg: err = "%s - %s." % (resp.reason, msg) else: err = "%s." % resp.reason raise exc.AuthenticationFailed(err) self._parse_response(resp_body) self.authenticated = True
python
{ "resource": "" }
q18169
BaseIdentity.keyring_auth
train
def keyring_auth(self, username=None): """ Uses the keyring module to retrieve the user's password or api_key. """ if not keyring: # Module not installed raise exc.KeyringModuleNotInstalled("The 'keyring' Python module " "is not installed on this system.") if username is None: username = pyrax.get_setting("keyring_username") if not username: raise exc.KeyringUsernameMissing("No username specified for " "keyring authentication.") password = keyring.get_password("pyrax", username) if password is None: raise exc.KeyringPasswordNotFound("No password was found for the " "username '%s'." % username) style = self._creds_style or self._default_creds_style # Keyring username may be different than the credentials. Use the # existing username, if present; otherwise, use the supplied username. username = self.username or username if style == "apikey": return self.authenticate(username=username, api_key=password) else: return self.authenticate(username=username, password=password)
python
{ "resource": "" }
q18170
BaseIdentity.unauthenticate
train
def unauthenticate(self): """ Clears out any credentials, tokens, and service catalog info. """ self.username = "" self.password = "" self.tenant_id = "" self.tenant_name = "" self.token = "" self.expires = None self.region = "" self._creds_file = None self.api_key = "" self.services = utils.DotDict() self.regions = utils.DotDict() self.authenticated = False
python
{ "resource": "" }
q18171
BaseIdentity.get_token
train
def get_token(self, force=False): """ Returns the auth token, if it is valid. If not, calls the auth endpoint to get a new token. Passing 'True' to 'force' forces a call for a new token, even if there already is a valid token. """ self.authenticated = self._has_valid_token() if force or not self.authenticated: self.authenticate() return self.token
python
{ "resource": "" }
q18172
BaseIdentity._has_valid_token
train
def _has_valid_token(self): """ This only checks the token's existence and expiration. If it has been invalidated on the server, this method may indicate that the token is valid when it might actually not be. """ return bool(self.token and (self.expires > datetime.datetime.now()))
python
{ "resource": "" }
q18173
BaseIdentity.list_tokens
train
def list_tokens(self): """ ADMIN ONLY. Returns a dict containing tokens, endpoints, user info, and role metadata. """ resp, resp_body = self.method_get("tokens/%s" % self.token, admin=True) if resp.status_code in (401, 403): raise exc.AuthorizationFailure("You must be an admin to make this " "call.") return resp_body.get("access")
python
{ "resource": "" }
q18174
BaseIdentity.check_token
train
def check_token(self, token=None): """ ADMIN ONLY. Returns True or False, depending on whether the current token is valid. """ if token is None: token = self.token resp, resp_body = self.method_head("tokens/%s" % token, admin=True) if resp.status_code in (401, 403): raise exc.AuthorizationFailure("You must be an admin to make this " "call.") return 200 <= resp.status_code < 300
python
{ "resource": "" }
q18175
BaseIdentity.revoke_token
train
def revoke_token(self, token): """ ADMIN ONLY. Returns True or False, depending on whether deletion of the specified token was successful. """ resp, resp_body = self.method_delete("tokens/%s" % token, admin=True) if resp.status_code in (401, 403): raise exc.AuthorizationFailure("You must be an admin to make this " "call.") return 200 <= resp.status_code < 300
python
{ "resource": "" }
q18176
BaseIdentity.delete_user
train
def delete_user(self, user): """ ADMIN ONLY. Removes the user from the system. There is no 'undo' available, so you should be certain that the user specified is the user you wish to delete. """ user_id = utils.get_id(user) uri = "users/%s" % user_id resp, resp_body = self.method_delete(uri) if resp.status_code == 404: raise exc.UserNotFound("User '%s' does not exist." % user) elif resp.status_code in (401, 403): raise exc.AuthorizationFailure("You are not authorized to delete " "users.")
python
{ "resource": "" }
q18177
BaseIdentity.list_credentials
train
def list_credentials(self, user=None): """ Returns a user's non-password credentials. If no user is specified, the credentials for the currently authenticated user are returned. You cannot retrieve passwords by this or any other means. """ if not user: user = self.user user_id = utils.get_id(user) uri = "users/%s/OS-KSADM/credentials" % user_id resp, resp_body = self.method_get(uri) return resp_body.get("credentials")
python
{ "resource": "" }
q18178
BaseIdentity.create_tenant
train
def create_tenant(self, name, description=None, enabled=True): """ ADMIN ONLY. Creates a new tenant. """ data = {"tenant": { "name": name, "enabled": enabled, }} if description: data["tenant"]["description"] = description resp, resp_body = self.method_post("tenants", data=data) return Tenant(self, resp_body)
python
{ "resource": "" }
q18179
BaseIdentity.update_tenant
train
def update_tenant(self, tenant, name=None, description=None, enabled=True): """ ADMIN ONLY. Updates an existing tenant. """ tenant_id = utils.get_id(tenant) data = {"tenant": { "enabled": enabled, }} if name: data["tenant"]["name"] = name if description: data["tenant"]["description"] = description resp, resp_body = self.method_put("tenants/%s" % tenant_id, data=data) return Tenant(self, resp_body)
python
{ "resource": "" }
q18180
BaseIdentity.delete_tenant
train
def delete_tenant(self, tenant): """ ADMIN ONLY. Removes the tenant from the system. There is no 'undo' available, so you should be certain that the tenant specified is the tenant you wish to delete. """ tenant_id = utils.get_id(tenant) uri = "tenants/%s" % tenant_id resp, resp_body = self.method_delete(uri) if resp.status_code == 404: raise exc.TenantNotFound("Tenant '%s' does not exist." % tenant)
python
{ "resource": "" }
q18181
BaseIdentity.list_roles
train
def list_roles(self, service_id=None, limit=None, marker=None): """ Returns a list of all global roles for users, optionally limited by service. Pagination can be handled through the standard 'limit' and 'marker' parameters. """ uri = "OS-KSADM/roles" pagination_items = [] if service_id is not None: pagination_items.append("serviceId=%s" % service_id) if limit is not None: pagination_items.append("limit=%s" % limit) if marker is not None: pagination_items.append("marker=%s" % marker) pagination = "&".join(pagination_items) if pagination: uri = "%s?%s" % (uri, pagination) resp, resp_body = self.method_get(uri) roles = resp_body.get("roles", []) return [Role(self, role) for role in roles]
python
{ "resource": "" }
q18182
BaseIdentity.get_role
train
def get_role(self, role): """ Returns a Role object representing the specified parameter. The 'role' parameter can be either an existing Role object, or the ID of the role. If an invalid role is passed, a NotFound exception is raised. """ uri = "OS-KSADM/roles/%s" % utils.get_id(role) resp, resp_body = self.method_get(uri) role = Role(self, resp_body.get("role")) return role
python
{ "resource": "" }
q18183
BaseIdentity.add_role_to_user
train
def add_role_to_user(self, role, user): """ Adds the specified role to the specified user. There is no return value upon success. Passing a non-existent role or user raises a NotFound exception. """ uri = "users/%s/roles/OS-KSADM/%s" % (utils.get_id(user), utils.get_id(role)) resp, resp_body = self.method_put(uri)
python
{ "resource": "" }
q18184
BaseIdentity.delete_role_from_user
train
def delete_role_from_user(self, role, user): """ Deletes the specified role from the specified user. There is no return value upon success. Passing a non-existent role or user raises a NotFound exception. """ uri = "users/%s/roles/OS-KSADM/%s" % (utils.get_id(user), utils.get_id(role)) resp, resp_body = self.method_delete(uri)
python
{ "resource": "" }
q18185
CloudNetworkManager._create_body
train
def _create_body(self, name, label=None, cidr=None): """ Used to create the dict required to create a network. Accepts either 'label' or 'name' as the keyword parameter for the label attribute. """ label = label or name body = {"network": { "label": label, "cidr": cidr, }} return body
python
{ "resource": "" }
q18186
CloudNetworkClient._configure_manager
train
def _configure_manager(self): """ Creates the Manager instance to handle networks. """ self._manager = CloudNetworkManager(self, resource_class=CloudNetwork, response_key="network", uri_base="os-networksv2")
python
{ "resource": "" }
q18187
CloudNetworkClient.find_network_by_label
train
def find_network_by_label(self, label): """ This is inefficient; it gets all the networks and then filters on the client side to find the matching name. """ networks = self.list() match = [network for network in networks if network.label == label] if not match: raise exc.NetworkNotFound("No network with the label '%s' exists" % label) elif len(match) > 1: raise exc.NetworkLabelNotUnique("There were %s matches for the label " "'%s'." % (len(match), label)) return match[0]
python
{ "resource": "" }
q18188
CloudNetworkClient.get_server_networks
train
def get_server_networks(self, network, public=False, private=False, key=None): """ Creates the dict of network UUIDs required by Cloud Servers when creating a new server with isolated networks. By default, the UUID values are returned with the key of "net-id", which is what novaclient expects. Other tools may require different values, such as 'uuid'. If that is the case, pass the desired key as the 'key' parameter. By default only this network is included. If you wish to create a server that has either the public (internet) or private (ServiceNet) networks, you have to pass those parameters in with values of True. """ return _get_server_networks(network, public=public, private=private, key=key)
python
{ "resource": "" }
q18189
MathGlyph.round
train
def round(self, digits=None): """round the geometry.""" copiedGlyph = self.copyWithoutMathSubObjects() # misc copiedGlyph.width = _roundNumber(self.width, digits) copiedGlyph.height = _roundNumber(self.height, digits) # contours copiedGlyph.contours = [] if self.contours: copiedGlyph.contours = _roundContours(self.contours, digits) # components copiedGlyph.components = [] if self.components: copiedGlyph.components = _roundComponents(self.components, digits) # guidelines copiedGlyph.guidelines = [] if self.guidelines: copiedGlyph.guidelines = _roundGuidelines(self.guidelines, digits) # anchors copiedGlyph.anchors = [] if self.anchors: copiedGlyph.anchors = _roundAnchors(self.anchors, digits) # image copiedGlyph.image = None if self.image: copiedGlyph.image = _roundImage(self.image, digits) return copiedGlyph
python
{ "resource": "" }
q18190
MathGlyph.drawPoints
train
def drawPoints(self, pointPen, filterRedundantPoints=False): """draw self using pointPen""" if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
python
{ "resource": "" }
q18191
MathGlyph.draw
train
def draw(self, pen, filterRedundantPoints=False): """draw self using pen""" from fontTools.pens.pointPen import PointToSegmentPen pointPen = PointToSegmentPen(pen) self.drawPoints(pointPen, filterRedundantPoints=filterRedundantPoints)
python
{ "resource": "" }
q18192
matrixToMathTransform
train
def matrixToMathTransform(matrix): """ Take a 6-tuple and return a ShallowTransform object.""" if isinstance(matrix, ShallowTransform): return matrix off, scl, rot = MathTransform(matrix).decompose() return ShallowTransform(off, scl, rot)
python
{ "resource": "" }
q18193
mathTransformToMatrix
train
def mathTransformToMatrix(mathTransform): """ Take a ShallowTransform object and return a 6-tuple. """ m = MathTransform().compose(mathTransform.offset, mathTransform.scale, mathTransform.rotation) return tuple(m)
python
{ "resource": "" }
q18194
_linearInterpolationTransformMatrix
train
def _linearInterpolationTransformMatrix(matrix1, matrix2, value): """ Linear, 'oldstyle' interpolation of the transform matrix.""" return tuple(_interpolateValue(matrix1[i], matrix2[i], value) for i in range(len(matrix1)))
python
{ "resource": "" }
q18195
_polarDecomposeInterpolationTransformation
train
def _polarDecomposeInterpolationTransformation(matrix1, matrix2, value): """ Interpolate using the MathTransform method. """ m1 = MathTransform(matrix1) m2 = MathTransform(matrix2) return tuple(m1.interpolate(m2, value))
python
{ "resource": "" }
q18196
Facts.facts
train
def facts(self): """Iterate over the asserted Facts.""" fact = lib.EnvGetNextFact(self._env, ffi.NULL) while fact != ffi.NULL: yield new_fact(self._env, fact) fact = lib.EnvGetNextFact(self._env, fact)
python
{ "resource": "" }
q18197
Facts.templates
train
def templates(self): """Iterate over the defined Templates.""" template = lib.EnvGetNextDeftemplate(self._env, ffi.NULL) while template != ffi.NULL: yield Template(self._env, template) template = lib.EnvGetNextDeftemplate(self._env, template)
python
{ "resource": "" }
q18198
Facts.find_template
train
def find_template(self, name): """Find the Template by its name.""" deftemplate = lib.EnvFindDeftemplate(self._env, name.encode()) if deftemplate == ffi.NULL: raise LookupError("Template '%s' not found" % name) return Template(self._env, deftemplate)
python
{ "resource": "" }
q18199
Facts.assert_string
train
def assert_string(self, string): """Assert a fact as string.""" fact = lib.EnvAssertString(self._env, string.encode()) if fact == ffi.NULL: raise CLIPSError(self._env) return new_fact(self._env, fact)
python
{ "resource": "" }