code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
abd = attemptsBeforeDeactivation
return self.manager.add_health_monitor(self, type=type, delay=delay,
timeout=timeout, attemptsBeforeDeactivation=abd,
path=path, statusRegex=statusRegex, bodyRegex=bodyRegex,
hostHeader=hostHeader) | def add_health_monitor(self, type, delay=10, timeout=10,
attemptsBeforeDeactivation=3, path="/", statusRegex=None,
bodyRegex=None, hostHeader=None) | Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings. | 2.697088 | 2.830073 | 0.95301 |
if not any((maxConnectionRate, maxConnections, minConnections,
rateInterval)):
# Pointless call
return
return self.manager.add_connection_throttle(self,
maxConnectionRate=maxConnectionRate, maxConnections=maxConnections,
minConnections=minConnections, rateInterval=rateInterval) | def add_connection_throttle(self, maxConnectionRate=None,
maxConnections=None, minConnections=None, rateInterval=None) | Updates the connection throttling information for the load balancer with
the supplied values. At least one of the parameters must be supplied. | 2.758808 | 3.09677 | 0.890866 |
return self.manager.add_ssl_termination(self, securePort=securePort,
privatekey=privatekey, certificate=certificate,
intermediateCertificate=intermediateCertificate,
enabled=enabled, secureTrafficOnly=secureTrafficOnly) | def add_ssl_termination(self, securePort, privatekey, certificate,
intermediateCertificate=None, enabled=True,
secureTrafficOnly=False) | Adds SSL termination information to the load balancer. If SSL
termination has already been configured, it is updated with the
supplied settings. | 1.912913 | 2.468706 | 0.774865 |
return self.manager.update_ssl_termination(self, securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly) | def update_ssl_termination(self, securePort=None, enabled=None,
secureTrafficOnly=None) | Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys. | 2.47866 | 3.89641 | 0.636139 |
return self.manager.set_metadata(self, metadata, node=node) | def set_metadata_for_node(self, node, metadata) | Sets the metadata for the specified node to the supplied dictionary
of values. Any existing metadata is cleared. | 8.129346 | 9.363336 | 0.86821 |
return self.manager.update_metadata(self, metadata, node=node) | def update_metadata_for_node(self, node, metadata) | Updates the existing metadata for the specified node with
the supplied dictionary. | 8.715071 | 10.030764 | 0.868834 |
return self.manager.delete_metadata(self, keys=keys, node=node) | def delete_metadata_for_node(self, node, keys=None) | Deletes metadata items specified by the 'keys' parameter for
the specified node. If no value for 'keys' is provided, all
metadata is deleted. | 6.203387 | 7.758278 | 0.799583 |
body = {}
if name is not None:
body["name"] = name
if algorithm is not None:
body["algorithm"] = algorithm
if protocol is not None:
body["protocol"] = protocol
if halfClosed is not None:
body["halfClosed"] = halfClosed
if port is not None:
body["port"] = port
if timeout is not None:
body["timeout"] = timeout
if httpsRedirect is not None:
body["httpsRedirect"] = httpsRedirect
if not body:
# Nothing passed
return
body = {"loadBalancer": body}
uri = "/loadbalancers/%s" % utils.get_id(lb)
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.ClientException as e:
message = e.message
details = e.details
if message and details:
errmsg = "%s - %s" % (message, details)
else:
errmsg = message
raise exc.InvalidLoadBalancerParameters(errmsg)
return resp, resp_body | def update(self, lb, name=None, algorithm=None, protocol=None,
halfClosed=None, port=None, timeout=None, httpsRedirect=None) | Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect | 1.976256 | 2.061518 | 0.958641 |
required = (virtual_ips, port, protocol)
if not all(required):
raise exc.MissingLoadBalancerParameters("Load Balancer creation "
"requires at least one virtual IP, a protocol, and a port.")
nodes = utils.coerce_to_list(nodes)
virtual_ips = utils.coerce_to_list(virtual_ips)
bad_conditions = [node.condition for node in nodes
if node.condition.upper() not in ("ENABLED", "DISABLED")]
if bad_conditions:
raise exc.InvalidNodeCondition("Nodes for new load balancer must be "
"created in either 'ENABLED' or 'DISABLED' condition; "
"received the following invalid conditions: %s" %
", ".join(set(bad_conditions)))
node_dicts = [nd.to_dict() for nd in nodes]
vip_dicts = [vip.to_dict() for vip in virtual_ips]
body = {"loadBalancer": {
"name": name,
"port": port,
"protocol": protocol,
"nodes": node_dicts,
"virtualIps": vip_dicts,
"algorithm": algorithm or "RANDOM",
"halfClosed": halfClosed,
"accessList": accessList,
"connectionLogging": connectionLogging,
"connectionThrottle": connectionThrottle,
"healthMonitor": healthMonitor,
"metadata": metadata,
"timeout": timeout,
"sessionPersistence": sessionPersistence,
"httpsRedirect": httpsRedirect,
}}
return body | def _create_body(self, name, port=None, protocol=None, nodes=None,
virtual_ips=None, algorithm=None, halfClosed=None, accessList=None,
connectionLogging=None, connectionThrottle=None, healthMonitor=None,
metadata=None, timeout=None, sessionPersistence=None,
httpsRedirect=None) | Used to create the dict required to create a load balancer instance. | 2.449874 | 2.421437 | 1.011744 |
if not isinstance(nodes, (list, tuple)):
nodes = [nodes]
node_dicts = [nd.to_dict() for nd in nodes]
resp, body = self.api.method_post("/loadbalancers/%s/nodes" % lb.id,
body={"nodes": node_dicts})
return resp, body | def add_nodes(self, lb, nodes) | Adds the list of nodes to the specified load balancer. | 3.209046 | 2.851021 | 1.125578 |
lb = node.parent
if not lb:
raise exc.UnattachedNode("No parent Load Balancer for this node "
"could be determined.")
resp, body = self.api.method_delete("/loadbalancers/%s/nodes/%s" %
(lb.id, node.id))
return resp, body | def delete_node(self, loadbalancer, node) | Removes the node from its load balancer. | 5.268571 | 4.834027 | 1.089893 |
lb = node.parent
if not lb:
raise exc.UnattachedNode("No parent Load Balancer for this node "
"could be determined.")
if diff is None:
diff = node._diff()
req_body = {"node": diff}
resp, body = self.api.method_put("/loadbalancers/%s/nodes/%s" %
(lb.id, node.id), body=req_body)
return resp, body | def update_node(self, node, diff=None) | Updates the node's attributes. | 4.743977 | 4.500817 | 1.054026 |
resp, body = self.api.method_post("/loadbalancers/%s/virtualips" % lb.id,
body=vip.to_dict())
return resp, body | def add_virtualip(self, lb, vip) | Adds the VirtualIP to the specified load balancer. | 3.811466 | 3.641682 | 1.046622 |
lb = vip.parent
if not lb:
raise exc.UnattachedVirtualIP("No parent Load Balancer for this "
"VirtualIP could be determined.")
resp, body = self.api.method_delete("/loadbalancers/%s/virtualips/%s" %
(lb.id, vip.id))
return resp, body | def delete_virtualip(self, loadbalancer, vip) | Deletes the VirtualIP from its load balancer. | 4.830808 | 4.577372 | 1.055367 |
req_body = {"accessList": access_list}
uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer)
resp, body = self.api.method_post(uri, body=req_body)
return body | def add_access_list(self, loadbalancer, access_list) | Adds the access list provided to the load balancer.
The 'access_list' should be a list of dicts in the following format:
[{"address": "192.0.43.10", "type": "DENY"},
{"address": "192.0.43.11", "type": "ALLOW"},
...
{"address": "192.0.43.99", "type": "DENY"},
]
If no access list exists, it is created. If an access list
already exists, it is updated with the provided list. | 3.114459 | 3.721054 | 0.836983 |
uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer)
resp, body = self.api.method_delete(uri)
return body | def delete_access_list(self, loadbalancer) | Removes the access list from this load balancer. | 3.659426 | 3.178061 | 1.151465 |
if not isinstance(item_ids, (list, tuple)):
item_ids = [item_ids]
valid_ids = [itm["id"] for itm in self.get_access_list(loadbalancer)]
bad_ids = [str(itm) for itm in item_ids if itm not in valid_ids]
if bad_ids:
raise exc.AccessListIDNotFound("The following ID(s) are not valid "
"Access List items: %s" % ", ".join(bad_ids))
items = "&".join(["id=%s" % item_id for item_id in item_ids])
uri = "/loadbalancers/%s/accesslist?%s" % (
utils.get_id(loadbalancer), items)
# TODO: add the item ids
resp, body = self.api.method_delete(uri)
return body | def delete_access_list_items(self, loadbalancer, item_ids) | Removes the item(s) from the load balancer's access list
that match the provided IDs. 'item_ids' should be one or
more access list item IDs. | 2.679101 | 2.649113 | 1.01132 |
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
return body.get("healthMonitor", {}) | def get_health_monitor(self, loadbalancer) | Returns a dict representing the health monitor for the load
balancer. If no monitor has been configured, returns an
empty dict. | 3.511329 | 2.986195 | 1.175854 |
uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer)
req_body = {"healthMonitor": {
"type": type,
"delay": delay,
"timeout": timeout,
"attemptsBeforeDeactivation": attemptsBeforeDeactivation,
}}
uptype = type.upper()
if uptype.startswith("HTTP"):
lb = self._get_lb(loadbalancer)
if uptype != lb.protocol:
raise exc.ProtocolMismatch("Cannot set the Health Monitor type "
"to '%s' when the Load Balancer's protocol is '%s'." %
(type, lb.protocol))
if not all((path, statusRegex, bodyRegex)):
raise exc.MissingHealthMonitorSettings("When creating an HTTP(S) "
"monitor, you must provide the 'path', 'statusRegex' and "
"'bodyRegex' parameters.")
body_hm = req_body["healthMonitor"]
body_hm["path"] = path
body_hm["statusRegex"] = statusRegex
body_hm["bodyRegex"] = bodyRegex
if hostHeader:
body_hm["hostHeader"] = hostHeader
resp, body = self.api.method_put(uri, body=req_body)
return body | def add_health_monitor(self, loadbalancer, type, delay=10, timeout=10,
attemptsBeforeDeactivation=3, path="/", statusRegex=None,
bodyRegex=None, hostHeader=None) | Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings. | 2.6888 | 2.646042 | 1.016159 |
settings = {}
if maxConnectionRate:
settings["maxConnectionRate"] = maxConnectionRate
if maxConnections:
settings["maxConnections"] = maxConnections
if minConnections:
settings["minConnections"] = minConnections
if rateInterval:
settings["rateInterval"] = rateInterval
req_body = {"connectionThrottle": settings}
uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer)
resp, body = self.api.method_put(uri, body=req_body)
return body | def add_connection_throttle(self, loadbalancer, maxConnectionRate=None,
maxConnections=None, minConnections=None, rateInterval=None) | Creates or updates the connection throttling information for the load
balancer. When first creating the connection throttle, all 4 parameters
must be supplied. When updating an existing connection throttle, at
least one of the parameters must be supplied. | 1.862178 | 1.905371 | 0.977331 |
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
try:
resp, body = self.api.method_get(uri)
except exc.NotFound:
# For some reason, instead of returning an empty dict like the
# other API GET calls, this raises a 404.
return {}
return body.get("sslTermination", {}) | def get_ssl_termination(self, loadbalancer) | Returns a dict representing the SSL termination configuration
for the load balancer. If SSL termination has not been configured,
returns an empty dict. | 4.499982 | 3.975575 | 1.131907 |
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
req_body = {"sslTermination": {
"certificate": certificate,
"enabled": enabled,
"secureTrafficOnly": secureTrafficOnly,
"privatekey": privatekey,
"intermediateCertificate": intermediateCertificate,
"securePort": securePort,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | def add_ssl_termination(self, loadbalancer, securePort, privatekey, certificate,
intermediateCertificate, enabled=True, secureTrafficOnly=False) | Adds SSL termination information to the load balancer. If SSL termination
has already been configured, it is updated with the supplied settings. | 2.144116 | 2.272609 | 0.94346 |
ssl_info = self.get_ssl_termination(loadbalancer)
if not ssl_info:
raise exc.NoSSLTerminationConfiguration("You must configure SSL "
"termination on this load balancer before attempting "
"to update it.")
if securePort is None:
securePort = ssl_info["securePort"]
if enabled is None:
enabled = ssl_info["enabled"]
if secureTrafficOnly is None:
secureTrafficOnly = ssl_info["secureTrafficOnly"]
uri = "/loadbalancers/%s/ssltermination" % utils.get_id(loadbalancer)
req_body = {"sslTermination": {
"enabled": enabled,
"secureTrafficOnly": secureTrafficOnly,
"securePort": securePort,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None) | Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys. | 2.066998 | 2.138164 | 0.966716 |
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
meta = body.get("metadata", [])
if raw:
return meta
ret = dict([(itm["key"], itm["value"]) for itm in meta])
return ret | def get_metadata(self, loadbalancer, node=None, raw=False) | Returns the current metadata for the load balancer. If 'node' is
provided, returns the current metadata for that node. | 2.081238 | 2.195196 | 0.948088 |
# Delete any existing metadata
self.delete_metadata(loadbalancer, node=node)
# Convert the metadata dict into the list format
metadata_list = [{"key": key, "value": val}
for key, val in metadata.items()]
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"metadata": metadata_list}
resp, body = self.api.method_post(uri, body=req_body)
return body | def set_metadata(self, loadbalancer, metadata, node=None) | Sets the metadata for the load balancer to the supplied dictionary
of values. Any existing metadata is cleared. If 'node' is provided,
the metadata for that node is set instead of for the load balancer. | 2.211903 | 2.243082 | 0.9861 |
# Get the existing metadata
md = self.get_metadata(loadbalancer, raw=True)
id_lookup = dict([(itm["key"], itm["id"]) for itm in md])
metadata_list = []
# Updates must be done individually
for key, val in metadata.items():
try:
meta_id = id_lookup[key]
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata/%s" % (
utils.get_id(loadbalancer), utils.get_id(node),
meta_id)
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"meta": {"value": val}}
resp, body = self.api.method_put(uri, body=req_body)
except KeyError:
# Not an existing key; add to metadata_list
metadata_list.append({"key": key, "value": val})
if metadata_list:
# New items; POST them
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
req_body = {"metadata": metadata_list}
resp, body = self.api.method_post(uri, body=req_body) | def update_metadata(self, loadbalancer, metadata, node=None) | Updates the existing metadata with the supplied dictionary. If
'node' is supplied, the metadata for that node is updated instead
of for the load balancer. | 2.138663 | 2.1591 | 0.990534 |
if keys and not isinstance(keys, (list, tuple)):
keys = [keys]
md = self.get_metadata(loadbalancer, node=node, raw=True)
if keys:
md = [dct for dct in md if dct["key"] in keys]
if not md:
# Nothing to do; log it? Raise an error?
return
id_list = "&".join(["id=%s" % itm["id"] for itm in md])
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata?%s" % (
utils.get_id(loadbalancer), utils.get_id(node), id_list)
else:
uri = "/loadbalancers/%s/metadata?%s" % (
utils.get_id(loadbalancer), id_list)
resp, body = self.api.method_delete(uri)
return body | def delete_metadata(self, loadbalancer, keys=None, node=None) | Deletes metadata items specified by the 'keys' parameter. If no value
for 'keys' is provided, all metadata is deleted. If 'node' is supplied,
the metadata for that node is deleted instead of the load balancer. | 2.437973 | 2.568366 | 0.949231 |
uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
return body | def get_error_page(self, loadbalancer) | Load Balancers all have a default error page that is shown to
an end user who is attempting to access a load balancer node
that is offline/unavailable. | 3.903693 | 3.440177 | 1.134736 |
uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer)
req_body = {"errorpage": {"content": html}}
resp, body = self.api.method_put(uri, body=req_body)
return body | def set_error_page(self, loadbalancer, html) | A single custom error page may be added per account load balancer
with an HTTP protocol. Page updates will override existing content.
If a custom error page is deleted, or the load balancer is changed
to a non-HTTP protocol, the default error page will be restored. | 3.175364 | 3.254806 | 0.975592 |
if start is end is None:
period = None
else:
parts = []
startStr = utils.iso_time_string(start)
if startStr:
parts.append("startTime=%s" % startStr)
endStr = utils.iso_time_string(end)
if endStr:
parts.append("endTime=%s" % endStr)
period = "&".join(parts).strip("&")
if loadbalancer is None:
uri = "/loadbalancers/usage"
else:
uri = "/loadbalancers/%s/usage" % utils.get_id(loadbalancer)
if period:
uri = "%s?%s" % (uri, period)
resp, body = self.api.method_get(uri)
return body | def get_usage(self, loadbalancer=None, start=None, end=None) | Return the load balancer usage records for this account. If 'loadbalancer'
is None, records for all load balancers are returned. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on the
end time. These times should be Python datetime.datetime objects, Python
datetime.date objects, or strings in the format: "YYYY-MM-DD HH:MM:SS" or
"YYYY-MM-DD". | 2.276685 | 2.251903 | 1.011005 |
uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
ret = body["sessionPersistence"].get("persistenceType", "")
return ret | def get_session_persistence(self, loadbalancer) | Returns the session persistence setting for the given load balancer. | 4.103336 | 3.640087 | 1.127263 |
val = val.upper()
uri = "/loadbalancers/%s/sessionpersistence" % utils.get_id(loadbalancer)
req_body = {"sessionPersistence": {
"persistenceType": val,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | def set_session_persistence(self, loadbalancer, val) | Sets the session persistence for the given load balancer. | 3.380502 | 3.47056 | 0.974051 |
uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
ret = body.get("connectionLogging", {}).get("enabled", False)
return ret | def get_connection_logging(self, loadbalancer) | Returns the connection logging setting for the given load balancer. | 3.575252 | 2.994381 | 1.193987 |
uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer)
val = str(val).lower()
req_body = {"connectionLogging": {
"enabled": val,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body | def set_connection_logging(self, loadbalancer, val) | Sets the connection logging for the given load balancer. | 3.422552 | 3.422505 | 1.000014 |
if isinstance(lb_or_id, CloudLoadBalancer):
ret = lb_or_id
else:
ret = self.get(lb_or_id)
return ret | def _get_lb(self, lb_or_id) | Accepts either a loadbalancer or the ID of a loadbalancer, and returns
the CloudLoadBalancer instance. | 2.682346 | 2.191664 | 1.223886 |
return {"address": self.address,
"port": self.port,
"condition": self.condition,
"type": self.type,
"id": self.id,
} | def to_dict(self) | Convert this Node to a dict representation for passing to the API. | 3.747339 | 3.162363 | 1.184981 |
diff = self._diff()
if not diff:
# Nothing to do!
return
self.parent.update_node(self, diff) | def update(self) | Pushes any local changes to the object up to the actual load
balancer node. | 7.52695 | 6.277306 | 1.199073 |
addr = self.address
servers = [server for server in pyrax.cloudservers.list()
if addr in server.networks.get("private", "")]
try:
return servers[0]
except IndexError:
return None | def get_device(self) | Returns a reference to the device that is represented by this node.
Returns None if no such device can be determined. | 6.698685 | 6.795805 | 0.985709 |
if self.id:
return {"id": self.id}
return {"type": self.type, "ipVersion": self.ip_version} | def to_dict(self) | Convert this VirtualIP to a dict representation for passing
to the API. | 5.525107 | 3.680228 | 1.501295 |
self._manager = CloudLoadBalancerManager(self,
resource_class=CloudLoadBalancer,
response_key="loadBalancer", uri_base="loadbalancers") | def _configure_manager(self) | Creates a manager to handle the instances, and another
to handle flavors. | 10.766905 | 8.910991 | 1.208272 |
return self._manager.get_usage(loadbalancer=loadbalancer, start=start,
end=end) | def get_usage(self, loadbalancer=None, start=None, end=None) | Return the load balancer usage records for this account. If 'loadbalancer'
is None, records for all load balancers are returned. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on the
end time. These times should be Python datetime.datetime objects, Python
datetime.date objects, or strings in the format: "YYYY-MM-DD HH:MM:SS" or
"YYYY-MM-DD". | 4.001717 | 4.149695 | 0.96434 |
if self._allowed_domains is None:
uri = "/loadbalancers/alloweddomains"
resp, body = self.method_get(uri)
dom_list = body["allowedDomains"]
self._allowed_domains = [itm["allowedDomain"]["name"]
for itm in dom_list]
return self._allowed_domains | def allowed_domains(self) | This property lists the allowed domains for a load balancer.
The allowed domains are restrictions set for the allowed domain names
used for adding load balancer nodes. In order to submit a domain name
as an address for the load balancer node to add, the user must verify
that the domain is valid by using the List Allowed Domains call. Once
verified, simply supply the domain name in place of the node's address
in the add_nodes() call. | 4.534252 | 3.623689 | 1.251281 |
if self._algorithms is None:
uri = "/loadbalancers/algorithms"
resp, body = self.method_get(uri)
self._algorithms = [alg["name"] for alg in body["algorithms"]]
return self._algorithms | def algorithms(self) | Returns a list of available load balancing algorithms. | 4.296834 | 2.954154 | 1.454506 |
if self._protocols is None:
uri = "/loadbalancers/protocols"
resp, body = self.method_get(uri)
self._protocols = [proto["name"] for proto in body["protocols"]]
return self._protocols | def protocols(self) | Returns a list of available load balancing protocols. | 4.183021 | 2.990697 | 1.398678 |
return self._manager.update(loadbalancer, name=name,
algorithm=algorithm, protocol=protocol, halfClosed=halfClosed,
port=port, timeout=timeout, httpsRedirect=httpsRedirect) | def update(self, loadbalancer, name=None, algorithm=None, protocol=None,
halfClosed=None, port=None, timeout=None, httpsRedirect=None) | Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect | 1.981223 | 2.142385 | 0.924774 |
return loadbalancer.add_connection_throttle(
maxConnectionRate=maxConnectionRate, maxConnections=maxConnections,
minConnections=minConnections, rateInterval=rateInterval) | def add_connection_throttle(self, loadbalancer, maxConnectionRate=None,
maxConnections=None, minConnections=None, rateInterval=None) | Updates the connection throttling information for the load balancer with
the supplied values. At least one of the parameters must be supplied. | 2.025585 | 2.535797 | 0.798796 |
return loadbalancer.update_ssl_termination(securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly) | def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None) | Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys. | 2.357601 | 3.646457 | 0.646546 |
return loadbalancer.delete_metadata_for_node(node, keys=keys) | def delete_metadata_for_node(self, loadbalancer, node, keys=None) | Deletes metadata items specified by the 'keys' parameter for
the specified node. If no value for 'keys' is provided, all
metadata is deleted. | 3.652304 | 5.197529 | 0.7027 |
try:
ret = int(val)
except ValueError:
print("Sorry, '%s' is not a valid integer." % val)
return False
if not allow_zero and ret == 0:
print("Please enter a non-zero integer.")
return False
return ret | def safe_int(val, allow_zero=True) | This function converts the six.moves.input values to integers. It handles
invalid entries, and optionally forbids values of zero. | 2.532012 | 2.36792 | 1.069298 |
if isinstance(response, dict):
status = response.get("status_code")
else:
status = response.status_code
cls = _code_map.get(int(status), ClientException)
# import pyrax
# pyrax.utils.trace()
request_id = response.headers.get("x-compute-request-id")
if body:
message = "n/a"
details = "n/a"
if isinstance(body, dict):
message = body.get("message")
details = body.get("details")
if message is details is None:
error = body[next(iter(body))]
if isinstance(error, dict):
message = error.get("message", None)
details = error.get("details", None)
else:
message = error
details = None
else:
message = body
return cls(code=status, message=message, details=details,
request_id=request_id)
else:
return cls(code=status, request_id=request_id) | def from_response(response, body) | Return an instance of a ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status_code != 200:
raise exception_from_response(resp, body) | 2.698689 | 2.73781 | 0.985711 |
@wraps(fnc)
def _wrapped(self, img, *args, **kwargs):
if not isinstance(img, Image):
# Must be the ID
img = self._manager.get(img)
return fnc(self, img, *args, **kwargs)
return _wrapped | def assure_image(fnc) | Converts a image ID passed as the 'image' parameter to a image object. | 2.984936 | 2.803634 | 1.064667 |
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri, return_raw=return_raw) | def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None,
return_raw=False) | Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters. | 1.674316 | 1.753386 | 0.954904 |
def strip_version(uri):
pos = uri.find("/images")
return uri[pos:]
obj_class = self.resource_class
resp, resp_body = self.list(name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir, return_raw=True)
data = resp_body.get(self.plural_response_key, resp_body)
next_uri = strip_version(resp_body.get("next", ""))
ret = [obj_class(manager=self, info=res) for res in data if res]
while next_uri:
resp, resp_body = self.api.method_get(next_uri)
data = resp_body.get(self.plural_response_key, resp_body)
next_uri = strip_version(resp_body.get("next", ""))
ret.extend([obj_class(manager=self, info=res)
for res in data if res])
return ret | def list_all(self, name=None, visibility=None, member_status=None,
owner=None, tag=None, status=None, size_min=None, size_max=None,
sort_key=None, sort_dir=None) | Returns all of the images in one call, rather than in paginated batches. | 2.259809 | 2.247862 | 1.005314 |
if img_format is None:
img_format = "vhd"
if img_container_format is None:
img_container_format = "bare"
headers = {
"X-Image-Meta-name": name,
"X-Image-Meta-disk_format": img_format,
"X-Image-Meta-container_format": img_container_format,
}
if data:
img_data = data
else:
ident = self.api.identity
region = self.api.region_name
clt = ident.get_client("object_store", region)
if not isinstance(obj, StorageObject):
obj = clt.get_object(container, obj)
img_data = obj.fetch()
uri = "%s/images" % self.uri_base
resp, resp_body = self.api.method_post(uri, headers=headers,
data=img_data) | def create(self, name, img_format=None, img_container_format=None,
data=None, container=None, obj=None, metadata=None) | Creates a new image with the specified name. The image data can either
be supplied directly in the 'data' parameter, or it can be an image
stored in the object storage service. In the case of the latter, you
can either supply the container and object names, or simply a
StorageObject reference.
You may specify the image and image container formats; if unspecified,
the default of "vhd" for image format and "bare" for image container
format will be used.
NOTE: This is blocking, and may take a while to complete. | 2.605696 | 2.427629 | 1.07335 |
img = self.get(img)
uri = "/%s/%s" % (self.uri_base, utils.get_id(img))
body = []
for key, val in value_dict.items():
op = "replace" if key in img.__dict__ else "add"
body.append({"op": op,
"path": "/%s" % key,
"value": val})
headers = {"Content-Type":
"application/openstack-images-v2.1-json-patch"}
resp, resp_body = self.api.method_patch(uri, body=body, headers=headers) | def update(self, img, value_dict) | Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not. | 3.090411 | 2.532666 | 1.22022 |
if status not in ("pending", "accepted", "rejected"):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
api = self.api
project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.NotFound as e:
raise exc.InvalidImageMember("The update member request could not "
"be completed. No member request for that image was found.") | def update_image_member(self, img_id, status) | Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised. | 3.623242 | 3.357189 | 1.079249 |
try:
return super(ImageMemberManager, self).create(name, *args, **kwargs)
except Exception as e:
if e.http_status == 403:
raise exc.UnsharableImage("You cannot share a public image.")
else:
raise | def create(self, name, *args, **kwargs) | Need to wrap the default call to handle exceptions. | 4.756873 | 4.234066 | 1.123476 |
img = utils.get_id(img)
cont = utils.get_name(cont)
body = {"type": name}
if name == "export":
body["input"] = {
"image_uuid": img,
"receiving_swift_container": cont}
else:
nm = "%s/%s" % (cont, utils.get_name(img))
body["input"] = {
"image_properties": {"name": img_name or img},
"import_from": nm,
"import_from_format": img_format or DEFAULT_FORMAT}
return body | def _create_body(self, name, img=None, cont=None, img_format=None,
img_name=None) | Used to create a new task. Since tasks don't have names, the required
'name' parameter is used for the type of task: 'import' or 'export'. | 4.267035 | 3.879967 | 1.099761 |
cont = kwargs.get("cont")
if cont:
# Verify that it exists. If it doesn't, a NoSuchContainer exception
# will be raised.
api = self.api
rgn = api.region_name
cf = api.identity.object_store[rgn].client
cf.get_container(cont)
return super(ImageTasksManager, self).create(name, *args, **kwargs) | def create(self, name, *args, **kwargs) | Standard task creation, but first check for the existence of the
containers, and raise an exception if they don't exist. | 7.443129 | 6.251877 | 1.190543 |
uri = "/%s/images" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def images(self) | Returns a json-schema document that represents an image members entity,
which is a container of image member entities. | 5.53892 | 4.370836 | 1.267245 |
uri = "/%s/image" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def image(self) | Returns a json-schema document that represents a single image entity. | 6.138416 | 4.719718 | 1.30059 |
uri = "/%s/members" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def image_members(self) | Returns a json-schema document that represents an image members entity
(a container of member entities). | 5.132823 | 4.380055 | 1.171863 |
uri = "/%s/member" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def image_member(self) | Returns a json-schema document that represents an image member entity.
(a container of member entities). | 5.758341 | 4.982526 | 1.155707 |
uri = "/%s/tasks" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def image_tasks(self) | Returns a json-schema document that represents a container of tasks
entities. | 5.606561 | 4.508249 | 1.243623 |
uri = "/%s/task" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | def image_task(self) | Returns a json-schema document that represents an task entity. | 6.245492 | 4.605647 | 1.356051 |
self._manager = ImageManager(self, resource_class=Image,
response_key="", plural_response_key="images",
uri_base="images")
self._tasks_manager = ImageTasksManager(self, resource_class=ImageTask,
response_key="", plural_response_key="tasks",
uri_base="tasks")
self._schema_manager = JSONSchemaManager(self, resource_class=None,
response_key="", plural_response_key="", uri_base="schemas") | def _configure_manager(self) | Create the manager to handle queues. | 3.75436 | 3.453489 | 1.087121 |
return self._manager.list(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir) | def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None) | Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters. | 1.521643 | 1.606545 | 0.947152 |
return self._manager.list_all(name=name, visibility=visibility,
member_status=member_status, owner=owner, tag=tag,
status=status, size_min=size_min, size_max=size_max,
sort_key=sort_key, sort_dir=sort_dir) | def list_all(self, name=None, visibility=None, member_status=None,
owner=None, tag=None, status=None, size_min=None, size_max=None,
sort_key=None, sort_dir=None) | Returns all of the images in one call, rather than in paginated batches.
The same filtering options available in list() apply here, with the
obvious exception of limit and marker. | 1.551798 | 1.645468 | 0.943074 |
return self._manager.create(name, img_format, data=data,
container=container, obj=obj) | def create(self, name, img_format=None, data=None, container=None,
obj=None, metadata=None) | Creates a new image with the specified name. The image data can either
be supplied directly in the 'data' parameter, or it can be an image
stored in the object storage service. In the case of the latter, you
can either supply the container and object names, or simply a
StorageObject reference. | 3.846959 | 4.954731 | 0.776421 |
return self._tasks_manager.create("export", img=img, cont=cont) | def export_task(self, img, cont) | Creates a task to export the specified image to the swift container
named in the 'cont' parameter. If the container does not exist, a
NoSuchContainer exception is raised.
The 'img' parameter can be either an Image object or the ID of an
image. If these do not correspond to a valid image, a NotFound
exception is raised. | 10.993677 | 12.230059 | 0.898906 |
return self._tasks_manager.create("import", img=img, cont=cont,
img_format=img_format, img_name=img_name) | def import_task(self, img, cont, img_format=None, img_name=None) | Creates a task to import the specified image from the swift container
named in the 'cont' parameter. The new image will be named the same as
the object in the container unless you specify a value for the
'img_name' parameter.
By default it is assumed that the image is in 'vhd' format; if it is
another format, you must specify that in the 'img_format' parameter. | 3.989216 | 4.307878 | 0.926028 |
return settings.set(key, val, env=env) | def set_setting(key, val, env=None) | Changes the value of the specified key in the current environment, or in
another environment if specified. | 5.773443 | 11.857917 | 0.486885 |
if env:
set_environment(env)
return _create_identity(id_type=id_type, username=username,
password=password, tenant_id=tenant_id, tenant_name=tenant_name,
api_key=api_key, verify_ssl=verify_ssl, return_context=True) | def create_context(id_type=None, env=None, username=None, password=None,
tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None) | Returns an instance of the specified identity class, or if none is
specified, an instance of the current setting for 'identity_class'.
You may optionally set the environment by passing the name of that
environment in the 'env' parameter. | 2.111932 | 2.528539 | 0.835238 |
if id_type:
cls = _import_identity(id_type)
else:
cls = settings.get("identity_class")
if not cls:
raise exc.IdentityClassNotDefined("No identity class has "
"been defined for the current environment.")
if verify_ssl is None:
verify_ssl = get_setting("verify_ssl")
context = cls(username=username, password=password, tenant_id=tenant_id,
tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl)
if return_context:
return context
else:
global identity
identity = context | def _create_identity(id_type=None, username=None, password=None, tenant_id=None,
tenant_name=None, api_key=None, verify_ssl=None,
return_context=False) | Creates an instance of the current identity_class and assigns it to the
module-level name 'identity' by default. If 'return_context' is True, the
module-level 'identity' is untouched, and instead the instance is returned. | 2.561205 | 2.553758 | 1.002916 |
def _wrapped(*args, **kwargs):
if identity is None:
_create_identity()
return fnc(*args, **kwargs)
return _wrapped | def _assure_identity(fnc) | Ensures that the 'identity' attribute is not None. | 3.298902 | 3.189847 | 1.034188 |
@wraps(fnc)
@_assure_identity
def _wrapped(*args, **kwargs):
if not identity.authenticated:
msg = "Authentication required before calling '%s'." % fnc.__name__
raise exc.NotAuthenticated(msg)
return fnc(*args, **kwargs)
return _wrapped | def _require_auth(fnc) | Authentication decorator. | 3.207248 | 3.201491 | 1.001798 |
ret = region or settings.get("region")
context = context or identity
if not ret:
# Nothing specified; get the default from the identity object.
if not context:
_create_identity()
context = identity
ret = context.get_default_region()
if not ret:
# Use the first available region
try:
ret = regions[0]
except IndexError:
ret = ""
return ret | def _safe_region(region=None, context=None) | Value to use when no region is specified. | 5.042909 | 4.761466 | 1.059109 |
global regions, services
identity.auth_with_token(token, tenant_id=tenant_id,
tenant_name=tenant_name)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region) | def auth_with_token(token, tenant_id=None, tenant_name=None, region=None) | If you already have a valid token and either a tenant ID or name, you can
call this to configure the identity and available services. | 4.687633 | 4.600502 | 1.018939 |
global regions, services
pw_key = password or api_key
region = _safe_region(region)
tenant_id = tenant_id or settings.get("tenant_id")
identity.set_credentials(username=username, password=pw_key,
tenant_id=tenant_id, region=region, authenticate=authenticate)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region) | def set_credentials(username, api_key=None, password=None, region=None,
tenant_id=None, authenticate=True) | Set the credentials directly, and then try to authenticate.
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections. | 3.945756 | 4.540427 | 0.869027 |
global regions, services
region = _safe_region(region)
identity.set_credential_file(cred_file, region=region,
authenticate=authenticate)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region) | def set_credential_file(cred_file, region=None, authenticate=True) | Read in the credentials from the supplied file path, and then try to
authenticate. The file should be a standard config file in one of the
following formats:
For Keystone authentication:
[keystone]
username = myusername
password = 1234567890abcdef
tenant_id = abcdef1234567890
For Rackspace authentication:
[rackspace_cloud]
username = myusername
api_key = 1234567890abcdef
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections. | 5.190866 | 6.073908 | 0.854617 |
if not keyring:
# Module not installed
raise exc.KeyringModuleNotInstalled("The 'keyring' Python module is "
"not installed on this system.")
if username is None:
username = settings.get("keyring_username")
if not username:
raise exc.KeyringUsernameMissing("No username specified for keyring "
"authentication.")
password = keyring.get_password("pyrax", username)
if password is None:
raise exc.KeyringPasswordNotFound("No password was found for the "
"username '%s'." % username)
set_credentials(username, password, region=region,
authenticate=authenticate) | def keyring_auth(username=None, region=None, authenticate=True) | Use the password stored within the keyring to authenticate. If a username
is supplied, that name is used; otherwise, the keyring_username value
from the config file is used.
If there is no username defined, or if the keyring module is not installed,
or there is no password set for the given username, the appropriate errors
will be raised.
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections. | 3.055756 | 3.40484 | 0.897474 |
global identity, regions, services, cloudservers, cloudfiles, cloud_cdn
global cloud_loadbalancers, cloud_databases, cloud_blockstorage, cloud_dns
global cloud_networks, cloud_monitoring, autoscale, images, queues
identity = None
regions = tuple()
services = tuple()
cloudservers = None
cloudfiles = None
cloud_cdn = None
cloud_loadbalancers = None
cloud_databases = None
cloud_blockstorage = None
cloud_dns = None
cloud_networks = None
cloud_monitoring = None
autoscale = None
images = None
queues = None | def clear_credentials() | De-authenticate by clearing all the names back to None. | 3.50459 | 3.411665 | 1.027237 |
global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases
global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring
global autoscale, images, queues, cloud_cdn
cloudservers = connect_to_cloudservers(region=region)
cloudfiles = connect_to_cloudfiles(region=region)
cloud_cdn = connect_to_cloud_cdn(region=region)
cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region)
cloud_databases = connect_to_cloud_databases(region=region)
cloud_blockstorage = connect_to_cloud_blockstorage(region=region)
cloud_dns = connect_to_cloud_dns(region=region)
cloud_networks = connect_to_cloud_networks(region=region)
cloud_monitoring = connect_to_cloud_monitoring(region=region)
autoscale = connect_to_autoscale(region=region)
images = connect_to_images(region=region)
queues = connect_to_queues(region=region) | def connect_to_services(region=None) | Establishes authenticated connections to the various cloud APIs. | 1.798178 | 1.760402 | 1.021459 |
region = _safe_region(region)
# If a specific context is passed, use that. Otherwise, use the global
# identity reference.
context = context or identity
url_type = {True: "public", False: "private"}[public]
svc_obj = context.services.get(svc)
if not svc_obj:
return None
ep = svc_obj.endpoints.get(region, {}).get(url_type)
if not ep:
# Try the "ALL" region, and substitute the actual region
ep = svc_obj.endpoints.get("ALL", {}).get(url_type)
return ep | def _get_service_endpoint(context, svc, region=None, public=True) | Parses the services dict to get the proper endpoint for the given service. | 4.189532 | 4.06068 | 1.031732 |
context = context or identity
_cs_auth_plugin.discover_auth_systems()
id_type = get_setting("identity_type")
if id_type != "keystone":
auth_plugin = _cs_auth_plugin.load_plugin(id_type)
else:
auth_plugin = None
region = _safe_region(region, context=context)
mgt_url = _get_service_endpoint(context, "compute", region)
cloudservers = None
if not mgt_url:
# Service is not available
return
if verify_ssl is None:
insecure = not get_setting("verify_ssl")
else:
insecure = not verify_ssl
try:
extensions = nc.discover_extensions(_cs_max_version)
except AttributeError:
extensions = None
clt_class = _cs_client.get_client_class(_cs_max_version)
cloudservers = clt_class(context.username, context.password,
project_id=context.tenant_id, auth_url=context.auth_endpoint,
auth_system=id_type, region_name=region, service_type="compute",
auth_plugin=auth_plugin, insecure=insecure, extensions=extensions,
http_log_debug=_http_debug, **kwargs)
agt = cloudservers.client.USER_AGENT
cloudservers.client.USER_AGENT = _make_agent_name(agt)
cloudservers.client.management_url = mgt_url
cloudservers.client.auth_token = context.token
cloudservers.exceptions = _cs_exceptions
# Add some convenience methods
cloudservers.list_images = cloudservers.images.list
cloudservers.list_flavors = cloudservers.flavors.list
cloudservers.list = cloudservers.servers.list
def list_base_images():
return [image for image in cloudservers.images.list()
if not hasattr(image, "server")]
def list_snapshots():
return [image for image in cloudservers.images.list()
if hasattr(image, "server")]
def find_images_by_name(expr):
return [image for image in cloudservers.images.list()
if re.search(expr, image.name, re.I)]
cloudservers.list_base_images = list_base_images
cloudservers.list_snapshots = list_snapshots
cloudservers.find_images_by_name = find_images_by_name
cloudservers.identity = identity
return cloudservers | def connect_to_cloudservers(region=None, context=None, verify_ssl=None, **kwargs) | Creates a client for working with cloud servers. | 2.918721 | 2.915369 | 1.00115 |
if public is None:
is_public = not bool(get_setting("use_servicenet"))
else:
is_public = public
ret = _create_client(ep_name="object_store", region=region,
public=is_public)
if ret:
# Add CDN endpoints, if available
region = _safe_region(region)
ret.cdn_management_url = _get_service_endpoint(None, "object_cdn",
region, public=is_public)
return ret | def connect_to_cloudfiles(region=None, public=None) | Creates a client for working with CloudFiles/Swift. | 5.949387 | 5.690546 | 1.045486 |
global default_region
# (nicholaskuechler/keekz) 2017-11-30 - Not a very elegant solution...
# Cloud CDN only exists in 2 regions: DFW and LON
# But this isn't playing nicely with the identity service catalog results.
# US auth based regions (DFW, ORD, IAD, SYD, HKG) need to use CDN in DFW
# UK auth based regions (LON) need to use CDN in LON
if region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:
return _create_client(ep_name="cdn", region="DFW")
elif region in ['LON']:
return _create_client(ep_name="cdn", region="LON")
else:
if default_region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:
return _create_client(ep_name="cdn", region="DFW")
elif default_region in ['LON']:
return _create_client(ep_name="cdn", region="LON")
else:
return _create_client(ep_name="cdn", region=region) | def connect_to_cloud_cdn(region=None) | Creates a client for working with cloud loadbalancers. | 4.560164 | 4.524307 | 1.007925 |
return _create_client(ep_name="image", region=region, public=public) | def connect_to_images(region=None, public=True) | Creates a client for working with Images. | 10.674826 | 7.224159 | 1.477657 |
return _create_client(ep_name="queues", region=region, public=public) | def connect_to_queues(region=None, public=True) | Creates a client for working with Queues. | 12.601128 | 8.612626 | 1.463099 |
if env is None:
env = self.environment
try:
ret = self._settings[env][key]
except KeyError:
ret = None
if ret is None:
# See if it's set in the environment
if key == "identity_class":
# This is defined via the identity_type
env_var = self.env_dct.get("identity_type")
ityp = os.environ.get(env_var)
if ityp:
return _import_identity(ityp)
else:
env_var = self.env_dct.get(key)
if env_var is not None:
ret = os.environ.get(env_var)
return ret | def get(self, key, env=None) | Returns the config setting for the specified environment. If no
environment is specified, the value for the current environment is
returned. If an unknown key or environment is passed, None is returned. | 3.859472 | 3.730796 | 1.03449 |
if env is None:
env = self.environment
else:
if env not in self._settings:
raise exc.EnvironmentNotFound("There is no environment named "
"'%s'." % env)
dct = self._settings[env]
if key not in dct:
raise exc.InvalidSetting("The setting '%s' is not defined." % key)
dct[key] = val
if key == "identity_type":
# If setting the identity_type, also change the identity_class.
dct["identity_class"] = _import_identity(val)
elif key == "region":
if not identity:
return
current = identity.region
if current == val:
return
if "LON" in (current, val):
# This is an outlier, as it has a separate auth
identity.region = val
elif key == "verify_ssl":
if not identity:
return
identity.verify_ssl = val | def set(self, key, val, env=None) | Changes the value for the setting specified by 'key' to the new value.
By default this will change the current environment, but you can change
values in other environments by passing the name of that environment as
the 'env' parameter. | 4.287431 | 4.119456 | 1.040776 |
cfg = ConfigParser.SafeConfigParser()
try:
cfg.read(config_file)
except ConfigParser.MissingSectionHeaderError as e:
# The file exists, but doesn't have the correct format.
raise exc.InvalidConfigurationFile(e)
def safe_get(section, option, default=None):
try:
return cfg.get(section, option)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return default
# A common mistake is including credentials in the config file. If any
# values are found, issue a warning so that the developer can correct
# this problem.
creds_found = False
for section in cfg.sections():
if section == "settings":
section_name = "default"
self._default_set = True
else:
section_name = section
# Check for included credentials
for key in ("username", "password", "api_key"):
if creds_found:
break
if safe_get(section, key):
creds_found = True
dct = self._settings[section_name] = {}
dct["region"] = safe_get(section, "region", default_region)
ityp = safe_get(section, "identity_type")
if ityp:
dct["identity_type"] = _id_type(ityp)
dct["identity_class"] = _import_identity(ityp)
# Handle both the old and new names for this setting.
debug = safe_get(section, "debug")
if debug is None:
debug = safe_get(section, "http_debug", "False")
dct["http_debug"] = debug == "True"
verify_ssl = safe_get(section, "verify_ssl", "True")
dct["verify_ssl"] = verify_ssl == "True"
dct["keyring_username"] = safe_get(section, "keyring_username")
dct["encoding"] = safe_get(section, "encoding", default_encoding)
dct["auth_endpoint"] = safe_get(section, "auth_endpoint")
dct["tenant_name"] = safe_get(section, "tenant_name")
dct["tenant_id"] = safe_get(section, "tenant_id")
use_servicenet = safe_get(section, "use_servicenet", "False")
dct["use_servicenet"] = use_servicenet == "True"
app_agent = safe_get(section, "custom_user_agent")
if app_agent:
# Customize the user-agent string with the app name.
dct["user_agent"] = "%s %s" % (app_agent, USER_AGENT)
else:
dct["user_agent"] = USER_AGENT
# If this is the first section, make it the default
if not self._default_set:
self._settings["default"] = self._settings[section]
self._default_set = True
if creds_found:
warnings.warn("Login credentials were detected in your .pyrax.cfg "
"file. These have been ignored, but you should remove "
"them and either place them in a credential file, or "
"consider using another means of authentication. More "
"information on the use of credential files can be found "
"in the 'docs/getting_started.md' document.") | def read_config(self, config_file) | Parses the specified configuration file and stores the values. Raises
an InvalidConfigurationFile exception if the file is not well-formed. | 2.880087 | 2.852071 | 1.009823 |
return self.manager.update_record(self.domain_id, self, data=data,
priority=priority, ttl=ttl, comment=comment) | def update(self, data=None, priority=None, ttl=None, comment=None) | Modifies this record. | 3.446011 | 3.283261 | 1.049569 |
self.manager.delete(self, delete_subdomains=delete_subdomains) | def delete(self, delete_subdomains=False) | Deletes this domain and all of its resource records. If this domain has
subdomains, each subdomain will now become a root domain. If you wish to
also delete any subdomains, pass True to 'delete_subdomains'. | 4.812733 | 4.173336 | 1.15321 |
return self.manager.update_domain(self, emailAddress=emailAddress,
ttl=ttl, comment=comment) | def update(self, emailAddress=None, ttl=None, comment=None) | Provides a way to modify the following attributes of a domain
entry:
- email address
- ttl setting
- comment | 4.115993 | 4.68876 | 0.877843 |
return self.manager.list_subdomains(self, limit=limit, offset=offset) | def list_subdomains(self, limit=None, offset=None) | Returns a list of all subdomains for this domain. | 4.30638 | 3.560357 | 1.209536 |
return self.manager.list_records(self, limit=limit, offset=offset) | def list_records(self, limit=None, offset=None) | Returns a list of all records configured for this domain. | 4.575129 | 3.776246 | 1.211555 |
return self.manager.search_records(self, record_type=record_type,
name=name, data=data) | def search_records(self, record_type, name=None, data=None) | Returns a list of all records configured for this domain that match
the supplied search criteria. | 3.644147 | 4.186479 | 0.870456 |
matches = self.manager.search_records(self, record_type=record_type,
name=name, data=data)
if not matches:
raise exc.DomainRecordNotFound
elif len(matches) > 1:
raise exc.DomainRecordNotUnique
return matches[0] | def find_record(self, record_type, name=None, data=None) | Returns a single record for this domain that matches the supplied
search criteria.
If no record matches, a DomainRecordNotFound exception will be raised.
If more than one matches, a DomainRecordNotUnique exception will
be raised. | 3.625388 | 2.566753 | 1.412441 |
return self.manager.update_record(self, record, data=data,
priority=priority, ttl=ttl, comment=comment) | def update_record(self, record, data=None, priority=None,
ttl=None, comment=None) | Modifies an existing record for this domain. | 2.504647 | 2.726435 | 0.918653 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.