_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q18000
option_chooser
train
def option_chooser(options, attr=None): """Given an iterable, enumerate its contents for a user to choose from. If the optional `attr` is not None, that attribute in each iterated object will be printed. This function will exit the program if the user chooses the escape option. """ for num, option in enumerate(options): if attr: print("%s: %s" % (num, getattr(option, attr))) else: print("%s: %s" % (num, option)) # Add an escape option escape_opt = num + 1 print("%s: I want to exit!" % escape_opt) choice = six.moves.input("Selection: ") try: ichoice = int(choice) if ichoice > escape_opt: raise ValueError except ValueError: print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt, choice)) sys.exit() if ichoice == escape_opt: print("Bye!") sys.exit() return ichoice
python
{ "resource": "" }
q18001
assure_queue
train
def assure_queue(fnc): """ Converts a queue ID or name passed as the 'queue' parameter to a Queue object. """ @wraps(fnc) def _wrapped(self, queue, *args, **kwargs): if not isinstance(queue, Queue): # Must be the ID queue = self._manager.get(queue) return fnc(self, queue, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q18002
Queue.list
train
def list(self, include_claimed=False, echo=False, marker=None, limit=None): """ Returns a list of messages for this queue. By default only unclaimed messages are returned; if you want claimed messages included, pass `include_claimed=True`. Also, the requester's own messages are not returned by default; if you want them included, pass `echo=True`. The 'marker' and 'limit' parameters are used to control pagination of results. 'Marker' is the ID of the last message returned, while 'limit' controls the number of messages returned per reuqest (default=20). """ return self._message_manager.list(include_claimed=include_claimed, echo=echo, marker=marker, limit=limit)
python
{ "resource": "" }
q18003
Queue.list_by_claim
train
def list_by_claim(self, claim): """ Returns a list of all the messages from this queue that have been claimed by the specified claim. The claim can be either a claim ID or a QueueClaim object. """ if not isinstance(claim, QueueClaim): claim = self._claim_manager.get(claim) return claim.messages
python
{ "resource": "" }
q18004
QueueMessage._add_details
train
def _add_details(self, info): """ The 'id' and 'claim_id' attributes are not supplied directly, but included as part of the 'href' value. """ super(QueueMessage, self)._add_details(info) if self.href is None: return parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit("/", 1)[-1] query = parsed.query if query: self.claim_id = query.split("claim_id=")[-1]
python
{ "resource": "" }
q18005
QueueClaim._add_details
train
def _add_details(self, info): """ The 'id' attribute is not supplied directly, but included as part of the 'href' value. Also, convert the dicts for messages into QueueMessage objects. """ msg_dicts = info.pop("messages", []) super(QueueClaim, self)._add_details(info) parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit("/", 1)[-1] self.messages = [QueueMessage(self.manager._message_manager, item) for item in msg_dicts]
python
{ "resource": "" }
q18006
QueueMessageManager._iterate_list
train
def _iterate_list(self, include_claimed, echo, marker, limit): """ Recursive method to work around the hard limit of 10 items per call. """ ret = [] if limit is None: this_limit = MSG_LIMIT else: this_limit = min(MSG_LIMIT, limit) limit = limit - this_limit uri = "/%s?include_claimed=%s&echo=%s" % (self.uri_base, json.dumps(include_claimed), json.dumps(echo)) qs_parts = [] if marker is not None: qs_parts.append("marker=%s" % marker) if this_limit is not None: qs_parts.append("limit=%s" % this_limit) if qs_parts: uri = "%s&%s" % (uri, "&".join(qs_parts)) resp, resp_body = self._list(uri, return_raw=True) if not resp_body: return ret messages = resp_body.get(self.plural_response_key, []) ret = [QueueMessage(manager=self, info=item) for item in messages] marker = _parse_marker(resp_body) loop = 0 if ((limit is None) or limit > 0) and marker: loop += 1 ret.extend(self._iterate_list(include_claimed, echo, marker, limit)) return ret
python
{ "resource": "" }
q18007
QueueMessageManager.delete
train
def delete(self, msg, claim_id=None): """ Deletes the specified message from its queue. If the message has been claimed, the ID of that claim must be passed as the 'claim_id' parameter. """ msg_id = utils.get_id(msg) if claim_id: uri = "/%s/%s?claim_id=%s" % (self.uri_base, msg_id, claim_id) else: uri = "/%s/%s" % (self.uri_base, msg_id) return self._delete(uri)
python
{ "resource": "" }
q18008
QueueMessageManager.list_by_ids
train
def list_by_ids(self, ids): """ If you wish to retrieve a list of messages from this queue and know the IDs of those messages, you can pass in a list of those IDs, and only the matching messages will be returned. This avoids pulling down all the messages in a queue and filtering on the client side. """ ids = utils.coerce_to_list(ids) uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids)) # The API is not consistent in how it returns message lists, so this # workaround is needed. curr_prkey = self.plural_response_key self.plural_response_key = "" # BROKEN: API returns a list, not a dict. ret = self._list(uri) self.plural_response_key = curr_prkey return ret
python
{ "resource": "" }
q18009
QueueMessageManager.delete_by_ids
train
def delete_by_ids(self, ids): """ Deletes the messages whose IDs are passed in from this queue. """ ids = utils.coerce_to_list(ids) uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids)) return self.api.method_delete(uri)
python
{ "resource": "" }
q18010
QueueManager.get
train
def get(self, id_): """ Need to customize, since Queues are not returned with normal response bodies. """ if self.api.queue_exists(id_): return Queue(self, {"queue": {"name": id_, "id_": id_}}, key="queue") raise exc.NotFound("The queue '%s' does not exist." % id_)
python
{ "resource": "" }
q18011
QueueManager.get_stats
train
def get_stats(self, queue): """ Returns the message stats for the specified queue. """ uri = "/%s/%s/stats" % (self.uri_base, utils.get_id(queue)) resp, resp_body = self.api.method_get(uri) return resp_body.get("messages")
python
{ "resource": "" }
q18012
QueueManager.get_metadata
train
def get_metadata(self, queue): """ Returns the metadata for the specified queue. """ uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue)) resp, resp_body = self.api.method_get(uri) return resp_body
python
{ "resource": "" }
q18013
QueueClient._add_custom_headers
train
def _add_custom_headers(self, dct): """ Add the Client-ID header required by Cloud Queues """ if self.client_id is None: self.client_id = os.environ.get("CLOUD_QUEUES_ID") if self.client_id: dct["Client-ID"] = self.client_id
python
{ "resource": "" }
q18014
QueueClient._api_request
train
def _api_request(self, uri, method, **kwargs): """ Any request that involves messages must define the client ID. This handles all failures due to lack of client ID and raises the appropriate exception. """ try: return super(QueueClient, self)._api_request(uri, method, **kwargs) except exc.BadRequest as e: if ((e.code == "400") and (e.message == 'The "Client-ID" header is required.')): raise exc.QueueClientIDNotDefined("You must supply a client ID " "to work with Queue messages.") else: raise
python
{ "resource": "" }
q18015
QueueClient.queue_exists
train
def queue_exists(self, name): """ Returns True or False, depending on the existence of the named queue. """ try: queue = self._manager.head(name) return True except exc.NotFound: return False
python
{ "resource": "" }
q18016
QueueClient.list_messages
train
def list_messages(self, queue, include_claimed=False, echo=False, marker=None, limit=None): """ Returns a list of messages for the specified queue. By default only unclaimed messages are returned; if you want claimed messages included, pass `include_claimed=True`. Also, the requester's own messages are not returned by default; if you want them included, pass `echo=True`. The 'marker' and 'limit' parameters are used to control pagination of results. 'Marker' is the ID of the last message returned, while 'limit' controls the number of messages returned per reuqest (default=20). """ return queue.list(include_claimed=include_claimed, echo=echo, marker=marker, limit=limit)
python
{ "resource": "" }
q18017
QueueClient.claim_messages
train
def claim_messages(self, queue, ttl, grace, count=None): """ Claims up to `count` unclaimed messages from the specified queue. If count is not specified, the default is to claim 10 messages. The `ttl` parameter specifies how long the server should wait before releasing the claim. The ttl value MUST be between 60 and 43200 seconds. The `grace` parameter is the message grace period in seconds. The value of grace MUST be between 60 and 43200 seconds. The server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus a specified grace period to deal with crashed workers (up to 1209600 or 14 days including claim lifetime). If a claimed message would normally live longer than the grace period, its expiration will not be adjusted. Returns a QueueClaim object, whose 'messages' attribute contains the list of QueueMessage objects representing the claimed messages. """ return queue.claim_messages(ttl, grace, count=count)
python
{ "resource": "" }
q18018
CloudCDNClient.list_services
train
def list_services(self, limit=None, marker=None): """List CDN services.""" return self._services_manager.list(limit=limit, marker=marker)
python
{ "resource": "" }
q18019
CloudCDNClient.create_service
train
def create_service(self, name, flavor_id, domains, origins, restrictions=None, caching=None): """Create a new CDN service. Arguments: name: The name of the service. flavor_id: The ID of the flavor to use for this service. domains: A list of dictionaries, each of which has a required key "domain" and optional key "protocol" (the default protocol is http). origins: A list of dictionaries, each of which has a required key "origin" which is the URL or IP address to pull origin content from. Optional keys include "port" to use a port other than the default of 80, and "ssl" to enable SSL, which is disabled by default. caching: An optional """ return self._services_manager.create(name, flavor_id, domains, origins, restrictions, caching)
python
{ "resource": "" }
q18020
CloudCDNClient.delete_assets
train
def delete_assets(self, service_id, url=None, all=False): """Delete CDN assets Arguments: service_id: The ID of the service to delete from. url: The URL at which to delete assets all: When True, delete all assets associated with the service_id. You cannot specifiy both url and all. """ self._services_manager.delete_assets(service_id, url, all)
python
{ "resource": "" }
q18021
ServiceCatalog.url_for
train
def url_for(self, attr=None, filter_value=None, service_type=None, endpoint_type="publicURL", service_name=None, volume_service_name=None): """Fetches the public URL from the given service for a particular endpoint attribute. If none given, returns the first. See tests for sample service catalog.""" matching_endpoints = [] # We don't always get a service catalog back ... if "serviceCatalog" not in self.catalog["access"]: return None # Full catalog ... catalog = self.catalog["access"]["serviceCatalog"] for service in catalog: if service.get("type") != service_type: continue endpoints = service["endpoints"] for endpoint in endpoints: if not filter_value or endpoint.get(attr) == filter_value: endpoint["serviceName"] = service.get("name") matching_endpoints.append(endpoint) if not matching_endpoints: raise exc.EndpointNotFound() elif len(matching_endpoints) > 1: raise exc.AmbiguousEndpoints(endpoints=matching_endpoints) else: return matching_endpoints[0][endpoint_type]
python
{ "resource": "" }
q18022
CloudLoadBalancer._add_details
train
def _add_details(self, info): """Override the base behavior to add Nodes, VirtualIPs, etc.""" for (key, val) in six.iteritems(info): if key == "nodes": val = [Node(parent=self, **nd) for nd in val] elif key == "sessionPersistence": val = val['persistenceType'] elif key == "cluster": val = val['name'] elif key == "virtualIps": key = "virtual_ips" val = [VirtualIP(parent=self, **vip) for vip in val] setattr(self, key, val)
python
{ "resource": "" }
q18023
CloudLoadBalancer.set_metadata_for_node
train
def set_metadata_for_node(self, node, metadata): """ Sets the metadata for the specified node to the supplied dictionary of values. Any existing metadata is cleared. """ return self.manager.set_metadata(self, metadata, node=node)
python
{ "resource": "" }
q18024
CloudLoadBalancer.update_metadata_for_node
train
def update_metadata_for_node(self, node, metadata): """ Updates the existing metadata for the specified node with the supplied dictionary. """ return self.manager.update_metadata(self, metadata, node=node)
python
{ "resource": "" }
q18025
CloudLoadBalancerManager._create_body
train
def _create_body(self, name, port=None, protocol=None, nodes=None, virtual_ips=None, algorithm=None, halfClosed=None, accessList=None, connectionLogging=None, connectionThrottle=None, healthMonitor=None, metadata=None, timeout=None, sessionPersistence=None, httpsRedirect=None): """ Used to create the dict required to create a load balancer instance. """ required = (virtual_ips, port, protocol) if not all(required): raise exc.MissingLoadBalancerParameters("Load Balancer creation " "requires at least one virtual IP, a protocol, and a port.") nodes = utils.coerce_to_list(nodes) virtual_ips = utils.coerce_to_list(virtual_ips) bad_conditions = [node.condition for node in nodes if node.condition.upper() not in ("ENABLED", "DISABLED")] if bad_conditions: raise exc.InvalidNodeCondition("Nodes for new load balancer must be " "created in either 'ENABLED' or 'DISABLED' condition; " "received the following invalid conditions: %s" % ", ".join(set(bad_conditions))) node_dicts = [nd.to_dict() for nd in nodes] vip_dicts = [vip.to_dict() for vip in virtual_ips] body = {"loadBalancer": { "name": name, "port": port, "protocol": protocol, "nodes": node_dicts, "virtualIps": vip_dicts, "algorithm": algorithm or "RANDOM", "halfClosed": halfClosed, "accessList": accessList, "connectionLogging": connectionLogging, "connectionThrottle": connectionThrottle, "healthMonitor": healthMonitor, "metadata": metadata, "timeout": timeout, "sessionPersistence": sessionPersistence, "httpsRedirect": httpsRedirect, }} return body
python
{ "resource": "" }
q18026
CloudLoadBalancerManager.add_nodes
train
def add_nodes(self, lb, nodes): """Adds the list of nodes to the specified load balancer.""" if not isinstance(nodes, (list, tuple)): nodes = [nodes] node_dicts = [nd.to_dict() for nd in nodes] resp, body = self.api.method_post("/loadbalancers/%s/nodes" % lb.id, body={"nodes": node_dicts}) return resp, body
python
{ "resource": "" }
q18027
CloudLoadBalancerManager.delete_node
train
def delete_node(self, loadbalancer, node): """Removes the node from its load balancer.""" lb = node.parent if not lb: raise exc.UnattachedNode("No parent Load Balancer for this node " "could be determined.") resp, body = self.api.method_delete("/loadbalancers/%s/nodes/%s" % (lb.id, node.id)) return resp, body
python
{ "resource": "" }
q18028
CloudLoadBalancerManager.add_virtualip
train
def add_virtualip(self, lb, vip): """Adds the VirtualIP to the specified load balancer.""" resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id, body=vip.to_dict()) return resp, body
python
{ "resource": "" }
q18029
CloudLoadBalancerManager.delete_virtualip
train
def delete_virtualip(self, loadbalancer, vip): """Deletes the VirtualIP from its load balancer.""" lb = vip.parent if not lb: raise exc.UnattachedVirtualIP("No parent Load Balancer for this " "VirtualIP could be determined.") resp, body = self.api.method_delete("/loadbalancers/%s/virtualips/%s" % (lb.id, vip.id)) return resp, body
python
{ "resource": "" }
q18030
CloudLoadBalancerManager.add_access_list
train
def add_access_list(self, loadbalancer, access_list): """ Adds the access list provided to the load balancer. The 'access_list' should be a list of dicts in the following format: [{"address": "192.0.43.10", "type": "DENY"}, {"address": "192.0.43.11", "type": "ALLOW"}, ... {"address": "192.0.43.99", "type": "DENY"}, ] If no access list exists, it is created. If an access list already exists, it is updated with the provided list. """ req_body = {"accessList": access_list} uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) resp, body = self.api.method_post(uri, body=req_body) return body
python
{ "resource": "" }
q18031
CloudLoadBalancerManager.delete_access_list
train
def delete_access_list(self, loadbalancer): """ Removes the access list from this load balancer. """ uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) resp, body = self.api.method_delete(uri) return body
python
{ "resource": "" }
q18032
CloudLoadBalancerManager.get_health_monitor
train
def get_health_monitor(self, loadbalancer): """ Returns a dict representing the health monitor for the load balancer. If no monitor has been configured, returns an empty dict. """ uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer) resp, body = self.api.method_get(uri) return body.get("healthMonitor", {})
python
{ "resource": "" }
q18033
CloudLoadBalancerManager.add_connection_throttle
train
def add_connection_throttle(self, loadbalancer, maxConnectionRate=None, maxConnections=None, minConnections=None, rateInterval=None): """ Creates or updates the connection throttling information for the load balancer. When first creating the connection throttle, all 4 parameters must be supplied. When updating an existing connection throttle, at least one of the parameters must be supplied. """ settings = {} if maxConnectionRate: settings["maxConnectionRate"] = maxConnectionRate if maxConnections: settings["maxConnections"] = maxConnections if minConnections: settings["minConnections"] = minConnections if rateInterval: settings["rateInterval"] = rateInterval req_body = {"connectionThrottle": settings} uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer) resp, body = self.api.method_put(uri, body=req_body) return body
python
{ "resource": "" }
q18034
CloudLoadBalancerManager.get_ssl_termination
train
def get_ssl_termination(self, loadbalancer): """ Returns a dict representing the SSL termination configuration for the load balancer. If SSL termination has not been configured, returns an empty dict. """ uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer) try: resp, body = self.api.method_get(uri) except exc.NotFound: # For some reason, instead of returning an empty dict like the # other API GET calls, this raises a 404. return {} return body.get("sslTermination", {})
python
{ "resource": "" }
q18035
CloudLoadBalancerManager.get_metadata
train
def get_metadata(self, loadbalancer, node=None, raw=False): """ Returns the current metadata for the load balancer. If 'node' is provided, returns the current metadata for that node. """ if node: uri = "/loadbalancers/%s/nodes/%s/metadata" % ( utils.get_id(loadbalancer), utils.get_id(node)) else: uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) resp, body = self.api.method_get(uri) meta = body.get("metadata", []) if raw: return meta ret = dict([(itm["key"], itm["value"]) for itm in meta]) return ret
python
{ "resource": "" }
q18036
CloudLoadBalancerManager.set_metadata
train
def set_metadata(self, loadbalancer, metadata, node=None): """ Sets the metadata for the load balancer to the supplied dictionary of values. Any existing metadata is cleared. If 'node' is provided, the metadata for that node is set instead of for the load balancer. """ # Delete any existing metadata self.delete_metadata(loadbalancer, node=node) # Convert the metadata dict into the list format metadata_list = [{"key": key, "value": val} for key, val in metadata.items()] if node: uri = "/loadbalancers/%s/nodes/%s/metadata" % ( utils.get_id(loadbalancer), utils.get_id(node)) else: uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) req_body = {"metadata": metadata_list} resp, body = self.api.method_post(uri, body=req_body) return body
python
{ "resource": "" }
q18037
CloudLoadBalancerManager.update_metadata
train
def update_metadata(self, loadbalancer, metadata, node=None): """ Updates the existing metadata with the supplied dictionary. If 'node' is supplied, the metadata for that node is updated instead of for the load balancer. """ # Get the existing metadata md = self.get_metadata(loadbalancer, raw=True) id_lookup = dict([(itm["key"], itm["id"]) for itm in md]) metadata_list = [] # Updates must be done individually for key, val in metadata.items(): try: meta_id = id_lookup[key] if node: uri = "/loadbalancers/%s/nodes/%s/metadata/%s" % ( utils.get_id(loadbalancer), utils.get_id(node), meta_id) else: uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) req_body = {"meta": {"value": val}} resp, body = self.api.method_put(uri, body=req_body) except KeyError: # Not an existing key; add to metadata_list metadata_list.append({"key": key, "value": val}) if metadata_list: # New items; POST them if node: uri = "/loadbalancers/%s/nodes/%s/metadata" % ( utils.get_id(loadbalancer), utils.get_id(node)) else: uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer) req_body = {"metadata": metadata_list} resp, body = self.api.method_post(uri, body=req_body)
python
{ "resource": "" }
q18038
CloudLoadBalancerManager.delete_metadata
train
def delete_metadata(self, loadbalancer, keys=None, node=None): """ Deletes metadata items specified by the 'keys' parameter. If no value for 'keys' is provided, all metadata is deleted. If 'node' is supplied, the metadata for that node is deleted instead of the load balancer. """ if keys and not isinstance(keys, (list, tuple)): keys = [keys] md = self.get_metadata(loadbalancer, node=node, raw=True) if keys: md = [dct for dct in md if dct["key"] in keys] if not md: # Nothing to do; log it? Raise an error? return id_list = "&".join(["id=%s" % itm["id"] for itm in md]) if node: uri = "/loadbalancers/%s/nodes/%s/metadata?%s" % ( utils.get_id(loadbalancer), utils.get_id(node), id_list) else: uri = "/loadbalancers/%s/metadata?%s" % ( utils.get_id(loadbalancer), id_list) resp, body = self.api.method_delete(uri) return body
python
{ "resource": "" }
q18039
CloudLoadBalancerManager.set_error_page
train
def set_error_page(self, loadbalancer, html): """ A single custom error page may be added per account load balancer with an HTTP protocol. Page updates will override existing content. If a custom error page is deleted, or the load balancer is changed to a non-HTTP protocol, the default error page will be restored. """ uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer) req_body = {"errorpage": {"content": html}} resp, body = self.api.method_put(uri, body=req_body) return body
python
{ "resource": "" }
q18040
CloudLoadBalancerManager.get_session_persistence
train
def get_session_persistence(self, loadbalancer): """ Returns the session persistence setting for the given load balancer. """ uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer) resp, body = self.api.method_get(uri) ret = body["sessionPersistence"].get("persistenceType", "") return ret
python
{ "resource": "" }
q18041
CloudLoadBalancerManager.set_session_persistence
train
def set_session_persistence(self, loadbalancer, val): """ Sets the session persistence for the given load balancer. """ val = val.upper() uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer) req_body = {"sessionPersistence": { "persistenceType": val, }} resp, body = self.api.method_put(uri, body=req_body) return body
python
{ "resource": "" }
q18042
CloudLoadBalancerManager.get_connection_logging
train
def get_connection_logging(self, loadbalancer): """ Returns the connection logging setting for the given load balancer. """ uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) resp, body = self.api.method_get(uri) ret = body.get("connectionLogging", {}).get("enabled", False) return ret
python
{ "resource": "" }
q18043
CloudLoadBalancerManager.set_connection_logging
train
def set_connection_logging(self, loadbalancer, val): """ Sets the connection logging for the given load balancer. """ uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) val = str(val).lower() req_body = {"connectionLogging": { "enabled": val, }} resp, body = self.api.method_put(uri, body=req_body) return body
python
{ "resource": "" }
q18044
CloudLoadBalancerManager._get_lb
train
def _get_lb(self, lb_or_id): """ Accepts either a loadbalancer or the ID of a loadbalancer, and returns the CloudLoadBalancer instance. """ if isinstance(lb_or_id, CloudLoadBalancer): ret = lb_or_id else: ret = self.get(lb_or_id) return ret
python
{ "resource": "" }
q18045
Node.to_dict
train
def to_dict(self): """Convert this Node to a dict representation for passing to the API.""" return {"address": self.address, "port": self.port, "condition": self.condition, "type": self.type, "id": self.id, }
python
{ "resource": "" }
q18046
Node.update
train
def update(self): """ Pushes any local changes to the object up to the actual load balancer node. """ diff = self._diff() if not diff: # Nothing to do! return self.parent.update_node(self, diff)
python
{ "resource": "" }
q18047
Node.get_device
train
def get_device(self): """ Returns a reference to the device that is represented by this node. Returns None if no such device can be determined. """ addr = self.address servers = [server for server in pyrax.cloudservers.list() if addr in server.networks.get("private", "")] try: return servers[0] except IndexError: return None
python
{ "resource": "" }
q18048
VirtualIP.to_dict
train
def to_dict(self): """ Convert this VirtualIP to a dict representation for passing to the API. """ if self.id: return {"id": self.id} return {"type": self.type, "ipVersion": self.ip_version}
python
{ "resource": "" }
q18049
CloudLoadBalancerClient.allowed_domains
train
def allowed_domains(self): """ This property lists the allowed domains for a load balancer. The allowed domains are restrictions set for the allowed domain names used for adding load balancer nodes. In order to submit a domain name as an address for the load balancer node to add, the user must verify that the domain is valid by using the List Allowed Domains call. Once verified, simply supply the domain name in place of the node's address in the add_nodes() call. """ if self._allowed_domains is None: uri = "/loadbalancers/alloweddomains" resp, body = self.method_get(uri) dom_list = body["allowedDomains"] self._allowed_domains = [itm["allowedDomain"]["name"] for itm in dom_list] return self._allowed_domains
python
{ "resource": "" }
q18050
CloudLoadBalancerClient.algorithms
train
def algorithms(self): """ Returns a list of available load balancing algorithms. """ if self._algorithms is None: uri = "/loadbalancers/algorithms" resp, body = self.method_get(uri) self._algorithms = [alg["name"] for alg in body["algorithms"]] return self._algorithms
python
{ "resource": "" }
q18051
CloudLoadBalancerClient.protocols
train
def protocols(self): """ Returns a list of available load balancing protocols. """ if self._protocols is None: uri = "/loadbalancers/protocols" resp, body = self.method_get(uri) self._protocols = [proto["name"] for proto in body["protocols"]] return self._protocols
python
{ "resource": "" }
q18052
safe_int
train
def safe_int(val, allow_zero=True): """ This function converts the six.moves.input values to integers. It handles invalid entries, and optionally forbids values of zero. """ try: ret = int(val) except ValueError: print("Sorry, '%s' is not a valid integer." % val) return False if not allow_zero and ret == 0: print("Please enter a non-zero integer.") return False return ret
python
{ "resource": "" }
q18053
from_response
train
def from_response(response, body): """ Return an instance of a ClientException or subclass based on an httplib2 response. Usage:: resp, body = http.request(...) if resp.status_code != 200: raise exception_from_response(resp, body) """ if isinstance(response, dict): status = response.get("status_code") else: status = response.status_code cls = _code_map.get(int(status), ClientException) # import pyrax # pyrax.utils.trace() request_id = response.headers.get("x-compute-request-id") if body: message = "n/a" details = "n/a" if isinstance(body, dict): message = body.get("message") details = body.get("details") if message is details is None: error = body[next(iter(body))] if isinstance(error, dict): message = error.get("message", None) details = error.get("details", None) else: message = error details = None else: message = body return cls(code=status, message=message, details=details, request_id=request_id) else: return cls(code=status, request_id=request_id)
python
{ "resource": "" }
q18054
assure_image
train
def assure_image(fnc): """ Converts a image ID passed as the 'image' parameter to a image object. """ @wraps(fnc) def _wrapped(self, img, *args, **kwargs): if not isinstance(img, Image): # Must be the ID img = self._manager.get(img) return fnc(self, img, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q18055
ImageManager.list_all
train
def list_all(self, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None): """ Returns all of the images in one call, rather than in paginated batches. """ def strip_version(uri): """ The 'next' uri contains a redundant version number. We need to strip it to use in the method_get() call. """ pos = uri.find("/images") return uri[pos:] obj_class = self.resource_class resp, resp_body = self.list(name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir, return_raw=True) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret = [obj_class(manager=self, info=res) for res in data if res] while next_uri: resp, resp_body = self.api.method_get(next_uri) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret.extend([obj_class(manager=self, info=res) for res in data if res]) return ret
python
{ "resource": "" }
q18056
ImageManager.update_image_member
train
def update_image_member(self, img_id, status): """ Updates the image whose ID is given with the status specified. This must be called by the user whose project_id is in the members for the image. If called by the owner of the image, an InvalidImageMember exception will be raised. Valid values for 'status' include: pending accepted rejected Any other value will result in an InvalidImageMemberStatus exception being raised. """ if status not in ("pending", "accepted", "rejected"): raise exc.InvalidImageMemberStatus("The status value must be one " "of 'accepted', 'rejected', or 'pending'. Received: '%s'" % status) api = self.api project_id = api.identity.tenant_id uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id) body = {"status": status} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.InvalidImageMember("The update member request could not " "be completed. No member request for that image was found.")
python
{ "resource": "" }
q18057
ImageMemberManager.create
train
def create(self, name, *args, **kwargs): """ Need to wrap the default call to handle exceptions. """ try: return super(ImageMemberManager, self).create(name, *args, **kwargs) except Exception as e: if e.http_status == 403: raise exc.UnsharableImage("You cannot share a public image.") else: raise
python
{ "resource": "" }
q18058
ImageTasksManager.create
train
def create(self, name, *args, **kwargs): """ Standard task creation, but first check for the existence of the containers, and raise an exception if they don't exist. """ cont = kwargs.get("cont") if cont: # Verify that it exists. If it doesn't, a NoSuchContainer exception # will be raised. api = self.api rgn = api.region_name cf = api.identity.object_store[rgn].client cf.get_container(cont) return super(ImageTasksManager, self).create(name, *args, **kwargs)
python
{ "resource": "" }
q18059
JSONSchemaManager.images
train
def images(self): """ Returns a json-schema document that represents an image members entity, which is a container of image member entities. """ uri = "/%s/images" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body
python
{ "resource": "" }
q18060
JSONSchemaManager.image
train
def image(self): """ Returns a json-schema document that represents a single image entity. """ uri = "/%s/image" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body
python
{ "resource": "" }
q18061
JSONSchemaManager.image_tasks
train
def image_tasks(self): """ Returns a json-schema document that represents a container of tasks entities. """ uri = "/%s/tasks" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body
python
{ "resource": "" }
q18062
JSONSchemaManager.image_task
train
def image_task(self): """ Returns a json-schema document that represents an task entity. """ uri = "/%s/task" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body
python
{ "resource": "" }
q18063
ImageClient.export_task
train
def export_task(self, img, cont): """ Creates a task to export the specified image to the swift container named in the 'cont' parameter. If the container does not exist, a NoSuchContainer exception is raised. The 'img' parameter can be either an Image object or the ID of an image. If these do not correspond to a valid image, a NotFound exception is raised. """ return self._tasks_manager.create("export", img=img, cont=cont)
python
{ "resource": "" }
q18064
ImageClient.import_task
train
def import_task(self, img, cont, img_format=None, img_name=None): """ Creates a task to import the specified image from the swift container named in the 'cont' parameter. The new image will be named the same as the object in the container unless you specify a value for the 'img_name' parameter. By default it is assumed that the image is in 'vhd' format; if it is another format, you must specify that in the 'img_format' parameter. """ return self._tasks_manager.create("import", img=img, cont=cont, img_format=img_format, img_name=img_name)
python
{ "resource": "" }
q18065
set_setting
train
def set_setting(key, val, env=None): """ Changes the value of the specified key in the current environment, or in another environment if specified. """ return settings.set(key, val, env=env)
python
{ "resource": "" }
q18066
create_context
train
def create_context(id_type=None, env=None, username=None, password=None, tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None): """ Returns an instance of the specified identity class, or if none is specified, an instance of the current setting for 'identity_class'. You may optionally set the environment by passing the name of that environment in the 'env' parameter. """ if env: set_environment(env) return _create_identity(id_type=id_type, username=username, password=password, tenant_id=tenant_id, tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl, return_context=True)
python
{ "resource": "" }
q18067
_create_identity
train
def _create_identity(id_type=None, username=None, password=None, tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None, return_context=False): """ Creates an instance of the current identity_class and assigns it to the module-level name 'identity' by default. If 'return_context' is True, the module-level 'identity' is untouched, and instead the instance is returned. """ if id_type: cls = _import_identity(id_type) else: cls = settings.get("identity_class") if not cls: raise exc.IdentityClassNotDefined("No identity class has " "been defined for the current environment.") if verify_ssl is None: verify_ssl = get_setting("verify_ssl") context = cls(username=username, password=password, tenant_id=tenant_id, tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl) if return_context: return context else: global identity identity = context
python
{ "resource": "" }
q18068
_assure_identity
train
def _assure_identity(fnc): """Ensures that the 'identity' attribute is not None.""" def _wrapped(*args, **kwargs): if identity is None: _create_identity() return fnc(*args, **kwargs) return _wrapped
python
{ "resource": "" }
q18069
_require_auth
train
def _require_auth(fnc): """Authentication decorator.""" @wraps(fnc) @_assure_identity def _wrapped(*args, **kwargs): if not identity.authenticated: msg = "Authentication required before calling '%s'." % fnc.__name__ raise exc.NotAuthenticated(msg) return fnc(*args, **kwargs) return _wrapped
python
{ "resource": "" }
q18070
_safe_region
train
def _safe_region(region=None, context=None): """Value to use when no region is specified.""" ret = region or settings.get("region") context = context or identity if not ret: # Nothing specified; get the default from the identity object. if not context: _create_identity() context = identity ret = context.get_default_region() if not ret: # Use the first available region try: ret = regions[0] except IndexError: ret = "" return ret
python
{ "resource": "" }
q18071
auth_with_token
train
def auth_with_token(token, tenant_id=None, tenant_name=None, region=None): """ If you already have a valid token and either a tenant ID or name, you can call this to configure the identity and available services. """ global regions, services identity.auth_with_token(token, tenant_id=tenant_id, tenant_name=tenant_name) regions = tuple(identity.regions) services = tuple(identity.services.keys()) connect_to_services(region=region)
python
{ "resource": "" }
q18072
set_credentials
train
def set_credentials(username, api_key=None, password=None, region=None, tenant_id=None, authenticate=True): """ Set the credentials directly, and then try to authenticate. If the region is passed, it will authenticate against the proper endpoint for that region, and set the default region for connections. """ global regions, services pw_key = password or api_key region = _safe_region(region) tenant_id = tenant_id or settings.get("tenant_id") identity.set_credentials(username=username, password=pw_key, tenant_id=tenant_id, region=region, authenticate=authenticate) regions = tuple(identity.regions) services = tuple(identity.services.keys()) connect_to_services(region=region)
python
{ "resource": "" }
q18073
keyring_auth
train
def keyring_auth(username=None, region=None, authenticate=True): """ Use the password stored within the keyring to authenticate. If a username is supplied, that name is used; otherwise, the keyring_username value from the config file is used. If there is no username defined, or if the keyring module is not installed, or there is no password set for the given username, the appropriate errors will be raised. If the region is passed, it will authenticate against the proper endpoint for that region, and set the default region for connections. """ if not keyring: # Module not installed raise exc.KeyringModuleNotInstalled("The 'keyring' Python module is " "not installed on this system.") if username is None: username = settings.get("keyring_username") if not username: raise exc.KeyringUsernameMissing("No username specified for keyring " "authentication.") password = keyring.get_password("pyrax", username) if password is None: raise exc.KeyringPasswordNotFound("No password was found for the " "username '%s'." % username) set_credentials(username, password, region=region, authenticate=authenticate)
python
{ "resource": "" }
q18074
clear_credentials
train
def clear_credentials(): """De-authenticate by clearing all the names back to None.""" global identity, regions, services, cloudservers, cloudfiles, cloud_cdn global cloud_loadbalancers, cloud_databases, cloud_blockstorage, cloud_dns global cloud_networks, cloud_monitoring, autoscale, images, queues identity = None regions = tuple() services = tuple() cloudservers = None cloudfiles = None cloud_cdn = None cloud_loadbalancers = None cloud_databases = None cloud_blockstorage = None cloud_dns = None cloud_networks = None cloud_monitoring = None autoscale = None images = None queues = None
python
{ "resource": "" }
q18075
connect_to_services
train
def connect_to_services(region=None): """Establishes authenticated connections to the various cloud APIs.""" global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring global autoscale, images, queues, cloud_cdn cloudservers = connect_to_cloudservers(region=region) cloudfiles = connect_to_cloudfiles(region=region) cloud_cdn = connect_to_cloud_cdn(region=region) cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region) cloud_databases = connect_to_cloud_databases(region=region) cloud_blockstorage = connect_to_cloud_blockstorage(region=region) cloud_dns = connect_to_cloud_dns(region=region) cloud_networks = connect_to_cloud_networks(region=region) cloud_monitoring = connect_to_cloud_monitoring(region=region) autoscale = connect_to_autoscale(region=region) images = connect_to_images(region=region) queues = connect_to_queues(region=region)
python
{ "resource": "" }
q18076
_get_service_endpoint
train
def _get_service_endpoint(context, svc, region=None, public=True): """ Parses the services dict to get the proper endpoint for the given service. """ region = _safe_region(region) # If a specific context is passed, use that. Otherwise, use the global # identity reference. context = context or identity url_type = {True: "public", False: "private"}[public] svc_obj = context.services.get(svc) if not svc_obj: return None ep = svc_obj.endpoints.get(region, {}).get(url_type) if not ep: # Try the "ALL" region, and substitute the actual region ep = svc_obj.endpoints.get("ALL", {}).get(url_type) return ep
python
{ "resource": "" }
q18077
connect_to_cloudservers
train
def connect_to_cloudservers(region=None, context=None, verify_ssl=None, **kwargs): """Creates a client for working with cloud servers.""" context = context or identity _cs_auth_plugin.discover_auth_systems() id_type = get_setting("identity_type") if id_type != "keystone": auth_plugin = _cs_auth_plugin.load_plugin(id_type) else: auth_plugin = None region = _safe_region(region, context=context) mgt_url = _get_service_endpoint(context, "compute", region) cloudservers = None if not mgt_url: # Service is not available return if verify_ssl is None: insecure = not get_setting("verify_ssl") else: insecure = not verify_ssl try: extensions = nc.discover_extensions(_cs_max_version) except AttributeError: extensions = None clt_class = _cs_client.get_client_class(_cs_max_version) cloudservers = clt_class(context.username, context.password, project_id=context.tenant_id, auth_url=context.auth_endpoint, auth_system=id_type, region_name=region, service_type="compute", auth_plugin=auth_plugin, insecure=insecure, extensions=extensions, http_log_debug=_http_debug, **kwargs) agt = cloudservers.client.USER_AGENT cloudservers.client.USER_AGENT = _make_agent_name(agt) cloudservers.client.management_url = mgt_url cloudservers.client.auth_token = context.token cloudservers.exceptions = _cs_exceptions # Add some convenience methods cloudservers.list_images = cloudservers.images.list cloudservers.list_flavors = cloudservers.flavors.list cloudservers.list = cloudservers.servers.list def list_base_images(): """ Returns a list of all base images; excludes any images created by this account. """ return [image for image in cloudservers.images.list() if not hasattr(image, "server")] def list_snapshots(): """ Returns a list of all images created by this account; in other words, it excludes all the base images. """ return [image for image in cloudservers.images.list() if hasattr(image, "server")] def find_images_by_name(expr): """ Returns a list of images whose name contains the specified expression. The value passed is treated as a regular expression, allowing for more specific searches than simple wildcards. The matching is done in a case-insensitive manner. """ return [image for image in cloudservers.images.list() if re.search(expr, image.name, re.I)] cloudservers.list_base_images = list_base_images cloudservers.list_snapshots = list_snapshots cloudservers.find_images_by_name = find_images_by_name cloudservers.identity = identity return cloudservers
python
{ "resource": "" }
q18078
connect_to_cloud_cdn
train
def connect_to_cloud_cdn(region=None): """Creates a client for working with cloud loadbalancers.""" global default_region # (nicholaskuechler/keekz) 2017-11-30 - Not a very elegant solution... # Cloud CDN only exists in 2 regions: DFW and LON # But this isn't playing nicely with the identity service catalog results. # US auth based regions (DFW, ORD, IAD, SYD, HKG) need to use CDN in DFW # UK auth based regions (LON) need to use CDN in LON if region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']: return _create_client(ep_name="cdn", region="DFW") elif region in ['LON']: return _create_client(ep_name="cdn", region="LON") else: if default_region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']: return _create_client(ep_name="cdn", region="DFW") elif default_region in ['LON']: return _create_client(ep_name="cdn", region="LON") else: return _create_client(ep_name="cdn", region=region)
python
{ "resource": "" }
q18079
connect_to_images
train
def connect_to_images(region=None, public=True): """Creates a client for working with Images.""" return _create_client(ep_name="image", region=region, public=public)
python
{ "resource": "" }
q18080
connect_to_queues
train
def connect_to_queues(region=None, public=True): """Creates a client for working with Queues.""" return _create_client(ep_name="queues", region=region, public=public)
python
{ "resource": "" }
q18081
Settings.get
train
def get(self, key, env=None): """ Returns the config setting for the specified environment. If no environment is specified, the value for the current environment is returned. If an unknown key or environment is passed, None is returned. """ if env is None: env = self.environment try: ret = self._settings[env][key] except KeyError: ret = None if ret is None: # See if it's set in the environment if key == "identity_class": # This is defined via the identity_type env_var = self.env_dct.get("identity_type") ityp = os.environ.get(env_var) if ityp: return _import_identity(ityp) else: env_var = self.env_dct.get(key) if env_var is not None: ret = os.environ.get(env_var) return ret
python
{ "resource": "" }
q18082
Settings.set
train
def set(self, key, val, env=None): """ Changes the value for the setting specified by 'key' to the new value. By default this will change the current environment, but you can change values in other environments by passing the name of that environment as the 'env' parameter. """ if env is None: env = self.environment else: if env not in self._settings: raise exc.EnvironmentNotFound("There is no environment named " "'%s'." % env) dct = self._settings[env] if key not in dct: raise exc.InvalidSetting("The setting '%s' is not defined." % key) dct[key] = val if key == "identity_type": # If setting the identity_type, also change the identity_class. dct["identity_class"] = _import_identity(val) elif key == "region": if not identity: return current = identity.region if current == val: return if "LON" in (current, val): # This is an outlier, as it has a separate auth identity.region = val elif key == "verify_ssl": if not identity: return identity.verify_ssl = val
python
{ "resource": "" }
q18083
Settings.read_config
train
def read_config(self, config_file): """ Parses the specified configuration file and stores the values. Raises an InvalidConfigurationFile exception if the file is not well-formed. """ cfg = ConfigParser.SafeConfigParser() try: cfg.read(config_file) except ConfigParser.MissingSectionHeaderError as e: # The file exists, but doesn't have the correct format. raise exc.InvalidConfigurationFile(e) def safe_get(section, option, default=None): try: return cfg.get(section, option) except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): return default # A common mistake is including credentials in the config file. If any # values are found, issue a warning so that the developer can correct # this problem. creds_found = False for section in cfg.sections(): if section == "settings": section_name = "default" self._default_set = True else: section_name = section # Check for included credentials for key in ("username", "password", "api_key"): if creds_found: break if safe_get(section, key): creds_found = True dct = self._settings[section_name] = {} dct["region"] = safe_get(section, "region", default_region) ityp = safe_get(section, "identity_type") if ityp: dct["identity_type"] = _id_type(ityp) dct["identity_class"] = _import_identity(ityp) # Handle both the old and new names for this setting. debug = safe_get(section, "debug") if debug is None: debug = safe_get(section, "http_debug", "False") dct["http_debug"] = debug == "True" verify_ssl = safe_get(section, "verify_ssl", "True") dct["verify_ssl"] = verify_ssl == "True" dct["keyring_username"] = safe_get(section, "keyring_username") dct["encoding"] = safe_get(section, "encoding", default_encoding) dct["auth_endpoint"] = safe_get(section, "auth_endpoint") dct["tenant_name"] = safe_get(section, "tenant_name") dct["tenant_id"] = safe_get(section, "tenant_id") use_servicenet = safe_get(section, "use_servicenet", "False") dct["use_servicenet"] = use_servicenet == "True" app_agent = safe_get(section, "custom_user_agent") if app_agent: # Customize the user-agent string with the app name. dct["user_agent"] = "%s %s" % (app_agent, USER_AGENT) else: dct["user_agent"] = USER_AGENT # If this is the first section, make it the default if not self._default_set: self._settings["default"] = self._settings[section] self._default_set = True if creds_found: warnings.warn("Login credentials were detected in your .pyrax.cfg " "file. These have been ignored, but you should remove " "them and either place them in a credential file, or " "consider using another means of authentication. More " "information on the use of credential files can be found " "in the 'docs/getting_started.md' document.")
python
{ "resource": "" }
q18084
CloudDNSRecord.update
train
def update(self, data=None, priority=None, ttl=None, comment=None): """ Modifies this record. """ return self.manager.update_record(self.domain_id, self, data=data, priority=priority, ttl=ttl, comment=comment)
python
{ "resource": "" }
q18085
CloudDNSDomain.delete
train
def delete(self, delete_subdomains=False): """ Deletes this domain and all of its resource records. If this domain has subdomains, each subdomain will now become a root domain. If you wish to also delete any subdomains, pass True to 'delete_subdomains'. """ self.manager.delete(self, delete_subdomains=delete_subdomains)
python
{ "resource": "" }
q18086
CloudDNSDomain.list_subdomains
train
def list_subdomains(self, limit=None, offset=None): """ Returns a list of all subdomains for this domain. """ return self.manager.list_subdomains(self, limit=limit, offset=offset)
python
{ "resource": "" }
q18087
CloudDNSDomain.list_records
train
def list_records(self, limit=None, offset=None): """ Returns a list of all records configured for this domain. """ return self.manager.list_records(self, limit=limit, offset=offset)
python
{ "resource": "" }
q18088
CloudDNSDomain.search_records
train
def search_records(self, record_type, name=None, data=None): """ Returns a list of all records configured for this domain that match the supplied search criteria. """ return self.manager.search_records(self, record_type=record_type, name=name, data=data)
python
{ "resource": "" }
q18089
CloudDNSDomain.update_record
train
def update_record(self, record, data=None, priority=None, ttl=None, comment=None): """ Modifies an existing record for this domain. """ return self.manager.update_record(self, record, data=data, priority=priority, ttl=ttl, comment=comment)
python
{ "resource": "" }
q18090
CloudDNSManager._create_body
train
def _create_body(self, name, emailAddress, ttl=3600, comment=None, subdomains=None, records=None): """ Creates the appropriate dict for creating a new domain. """ if subdomains is None: subdomains = [] if records is None: records = [] body = {"domains": [{ "name": name, "emailAddress": emailAddress, "ttl": ttl, "comment": comment, "subdomains": { "domains": subdomains }, "recordsList": { "records": records }, }]} return body
python
{ "resource": "" }
q18091
CloudDNSManager._reset_paging
train
def _reset_paging(self, service, body=None): """ Resets the internal attributes when there is no current paging request. """ if service == "all": for svc in self._paging.keys(): svc_dct = self._paging[svc] svc_dct["next_uri"] = svc_dct["prev_uri"] = None svc_dct["total_entries"] = None return svc_dct = self._paging[service] svc_dct["next_uri"] = svc_dct["prev_uri"] = None svc_dct["total_entries"] = None if not body: return svc_dct["total_entries"] = body.get("totalEntries") links = body.get("links") uri_base = self.uri_base if links: for link in links: href = link["href"] pos = href.index(uri_base) page_uri = href[pos - 1:] if link["rel"] == "next": svc_dct["next_uri"] = page_uri elif link["rel"] == "previous": svc_dct["prev_uri"] = page_uri
python
{ "resource": "" }
q18092
CloudDNSManager.list
train
def list(self, limit=None, offset=None): """Gets a list of all domains, or optionally a page of domains.""" uri = "/%s%s" % (self.uri_base, self._get_pagination_qs(limit, offset)) return self._list(uri)
python
{ "resource": "" }
q18093
CloudDNSManager.list_previous_page
train
def list_previous_page(self): """ When paging through results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("domain", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of domains " "to list.") return self._list(uri)
python
{ "resource": "" }
q18094
CloudDNSManager.list_next_page
train
def list_next_page(self): """ When paging through results, this will return the next page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("domain", {}).get("next_uri") if uri is None: raise exc.NoMoreResults("There are no more pages of domains to " "list.") return self._list(uri)
python
{ "resource": "" }
q18095
CloudDNSManager._retry_get
train
def _retry_get(self, uri): """ Handles GET calls to the Cloud DNS API in order to retry on empty body responses. """ for i in six.moves.range(DEFAULT_RETRY): resp, body = self.api.method_get(uri) if body: return resp, body # Tried too many times raise exc.ServiceResponseFailure("The Cloud DNS service failed to " "respond to the request.")
python
{ "resource": "" }
q18096
CloudDNSManager._process_async_error
train
def _process_async_error(self, resp_body, error_class): """ The DNS API does not return a consistent format for their error messages. This abstracts out the differences in order to present a single unified message in the exception to be raised. """ def _fmt_error(err): # Remove the cumbersome Java-esque message details = err.get("details", "").replace("\n", " ") if not details: details = err.get("message", "") return "%s (%s)" % (details, err.get("code", "")) error = resp_body.get("error", "") if "failedItems" in error: # Multi-error response faults = error.get("failedItems", {}).get("faults", []) msgs = [_fmt_error(fault) for fault in faults] msg = "\n".join(msgs) else: msg = _fmt_error(error) raise error_class(msg)
python
{ "resource": "" }
q18097
CloudDNSManager.delete
train
def delete(self, domain, delete_subdomains=False): """ Deletes the specified domain and all of its resource records. If the domain has subdomains, each subdomain will now become a root domain. If you wish to also delete any subdomains, pass True to 'delete_subdomains'. """ uri = "/%s/%s" % (self.uri_base, utils.get_id(domain)) if delete_subdomains: uri = "%s?deleteSubdomains=true" % uri resp, resp_body = self._async_call(uri, method="DELETE", error_class=exc.DomainDeletionFailed, has_response=False)
python
{ "resource": "" }
q18098
CloudDNSManager.list_subdomains
train
def list_subdomains(self, domain, limit=None, offset=None): """ Returns a list of all subdomains of the specified domain. """ # The commented-out uri is the official API, but it is # horribly slow. # uri = "/domains/%s/subdomains" % utils.get_id(domain) uri = "/domains?name=%s" % domain.name page_qs = self._get_pagination_qs(limit, offset) if page_qs: uri = "%s&%s" % (uri, page_qs[1:]) return self._list_subdomains(uri, domain.id)
python
{ "resource": "" }
q18099
CloudDNSManager.list_subdomains_previous_page
train
def list_subdomains_previous_page(self): """ When paging through subdomain results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("subdomain", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of subdomains " "to list.") return self._list_subdomains(uri)
python
{ "resource": "" }