code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if subdomains is None:
subdomains = []
if records is None:
records = []
body = {"domains": [{
"name": name,
"emailAddress": emailAddress,
"ttl": ttl,
"comment": comment,
"subdomains": {
"domains": subdomains
},
"recordsList": {
"records": records
},
}]}
return body | def _create_body(self, name, emailAddress, ttl=3600, comment=None,
subdomains=None, records=None) | Creates the appropriate dict for creating a new domain. | 2.237204 | 2.136487 | 1.047141 |
if service == "all":
for svc in self._paging.keys():
svc_dct = self._paging[svc]
svc_dct["next_uri"] = svc_dct["prev_uri"] = None
svc_dct["total_entries"] = None
return
svc_dct = self._paging[service]
svc_dct["next_uri"] = svc_dct["prev_uri"] = None
svc_dct["total_entries"] = None
if not body:
return
svc_dct["total_entries"] = body.get("totalEntries")
links = body.get("links")
uri_base = self.uri_base
if links:
for link in links:
href = link["href"]
pos = href.index(uri_base)
page_uri = href[pos - 1:]
if link["rel"] == "next":
svc_dct["next_uri"] = page_uri
elif link["rel"] == "previous":
svc_dct["prev_uri"] = page_uri | def _reset_paging(self, service, body=None) | Resets the internal attributes when there is no current paging request. | 2.225418 | 2.17842 | 1.021574 |
uri = "/%s%s" % (self.uri_base, self._get_pagination_qs(limit, offset))
return self._list(uri) | def list(self, limit=None, offset=None) | Gets a list of all domains, or optionally a page of domains. | 5.92226 | 5.933456 | 0.998113 |
resp, resp_body = self._retry_get(uri)
if obj_class is None:
obj_class = self.resource_class
data = resp_body[self.plural_response_key]
ret = [obj_class(self, res, loaded=False)
for res in data if res]
self._reset_paging("domain", resp_body)
if list_all:
dom_paging = self._paging.get("domain", {})
while dom_paging.get("next_uri"):
next_uri = dom_paging.get("next_uri")
ret.extend(self._list(uri=next_uri, obj_class=obj_class,
list_all=False))
return ret | def _list(self, uri, obj_class=None, list_all=False) | Handles the communication with the API when getting
a full listing of the resources managed by this class. | 3.440244 | 3.488049 | 0.986295 |
uri = self._paging.get("domain", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of domains "
"to list.")
return self._list(uri) | def list_previous_page(self) | When paging through results, this will return the previous page, using
the same limit. If there are no more results, a NoMoreResults exception
will be raised. | 10.38129 | 8.082603 | 1.284399 |
uri = self._paging.get("domain", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of domains to "
"list.")
return self._list(uri) | def list_next_page(self) | When paging through results, this will return the next page, using the
same limit. If there are no more results, a NoMoreResults exception
will be raised. | 8.788561 | 7.447497 | 1.180069 |
uri = "%s?showRecords=false&showSubdomains=false" % uri
resp, body = self._retry_get(uri)
body["records"] = []
return self.resource_class(self, body, loaded=True) | def _get(self, uri) | Handles the communication with the API when getting
a specific resource managed by this class.
Because DNS returns a different format for the body,
the BaseManager method must be overridden here. | 7.907274 | 5.533327 | 1.429027 |
for i in six.moves.range(DEFAULT_RETRY):
resp, body = self.api.method_get(uri)
if body:
return resp, body
# Tried too many times
raise exc.ServiceResponseFailure("The Cloud DNS service failed to "
"respond to the request.") | def _retry_get(self, uri) | Handles GET calls to the Cloud DNS API in order to retry on empty
body responses. | 7.069594 | 6.425654 | 1.100214 |
api_methods = {
"GET": self._retry_get,
"POST": self.api.method_post,
"PUT": self.api.method_put,
"DELETE": self.api.method_delete,
}
api_method = api_methods[method]
try:
if body is None:
resp, resp_body = api_method(uri, *args, **kwargs)
else:
resp, resp_body = api_method(uri, body=body, *args, **kwargs)
except Exception as e:
if error_class:
raise error_class(e)
else:
raise
callbackURL = resp_body["callbackUrl"].split("/status/")[-1]
massagedURL = "/status/%s?showDetails=true" % callbackURL
start = time.time()
timed_out = False
while (resp_body["status"] == "RUNNING") and not timed_out:
resp_body = None
while resp_body is None and not timed_out:
resp, resp_body = self._retry_get(massagedURL)
if self._timeout:
timed_out = ((time.time() - start) > self._timeout)
time.sleep(self._delay)
if timed_out:
raise exc.DNSCallTimedOut("The API call to '%s' did not complete "
"after %s seconds." % (uri, self._timeout))
if error_class and (resp_body["status"] == "ERROR"):
# This call will handle raising the error.
self._process_async_error(resp_body, error_class)
if has_response:
ret = resp, resp_body["response"]
else:
ret = resp, resp_body
try:
resp_body = json.loads(resp_body)
except Exception:
pass
return ret | def _async_call(self, uri, body=None, method="GET", error_class=None,
has_response=True, *args, **kwargs) | Handles asynchronous call/responses for the DNS API.
Returns the response headers and body if the call was successful.
If an error status is returned, and the 'error_class' parameter is
specified, that class of error will be raised with the details from
the response. If no error class is specified, the response headers
and body will be returned to the calling method, which will have
to handle the result. | 2.940712 | 2.900993 | 1.013692 |
def _fmt_error(err):
# Remove the cumbersome Java-esque message
details = err.get("details", "").replace("\n", " ")
if not details:
details = err.get("message", "")
return "%s (%s)" % (details, err.get("code", ""))
error = resp_body.get("error", "")
if "failedItems" in error:
# Multi-error response
faults = error.get("failedItems", {}).get("faults", [])
msgs = [_fmt_error(fault) for fault in faults]
msg = "\n".join(msgs)
else:
msg = _fmt_error(error)
raise error_class(msg) | def _process_async_error(self, resp_body, error_class) | The DNS API does not return a consistent format for their error
messages. This abstracts out the differences in order to present
a single unified message in the exception to be raised. | 3.642786 | 3.342532 | 1.089828 |
self.run_hooks("modify_body_for_create", body, **kwargs)
resp, resp_body = self._async_call(uri, body=body, method="POST",
error_class=exc.DomainCreationFailed)
response_body = resp_body[self.response_key][0]
return self.resource_class(self, response_body) | def _create(self, uri, body, records=None, subdomains=None,
return_none=False, return_raw=False, **kwargs) | Handles the communication with the API when creating a new
resource managed by this class.
Since DNS works completely differently for create() than the other
APIs, this method overrides the default BaseManager behavior.
If 'records' are supplied, they should be a list of dicts. Each
record dict should have the following format:
{"name": "example.com",
"type": "A",
"data": "192.0.2.17",
"ttl": 86400}
If 'subdomains' are supplied, they should be a list of dicts. Each
subdomain dict should have the following format:
{"name": "sub1.example.com",
"comment": "1st sample subdomain",
"emailAddress": "sample@rackspace.com"} | 5.408093 | 5.590769 | 0.967325 |
uri = "/%s/%s" % (self.uri_base, utils.get_id(domain))
if delete_subdomains:
uri = "%s?deleteSubdomains=true" % uri
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False) | def delete(self, domain, delete_subdomains=False) | Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'. | 4.770695 | 4.903196 | 0.972977 |
if (len(kwargs) == 1) and ("name" in kwargs):
# Filtering on name; use the more efficient method.
nm = kwargs["name"].lower()
uri = "/%s?name=%s" % (self.uri_base, nm)
matches = self._list(uri, list_all=True)
return [match for match in matches
if match.name.lower() == nm]
else:
return super(CloudDNSManager, self).findall(**kwargs) | def findall(self, **kwargs) | Finds all items with attributes matching ``**kwargs``.
Normally this isn't very efficient, since the default action is to
load the entire list and then filter on the Python side, but the DNS
API provides a more efficient search option when filtering on name.
So if the filter is on name, use that; otherwise, use the default. | 4.593062 | 3.907812 | 1.175354 |
domain_id = utils.get_id(domain)
dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True)
uri = "/domains/%s/changes?since=%s" % (domain_id, dt)
resp, body = self._retry_get(uri)
return body.get("changes", []) | def changes_since(self, domain, date_or_datetime) | Gets the changes for a domain since the specified date/datetime.
The date can be one of:
- a Python datetime object
- a Python date object
- a string in the format 'YYYY-MM-YY HH:MM:SS'
- a string in the format 'YYYY-MM-YY'
It returns a list of dicts, whose keys depend on the specific change
that was made. A simple example of such a change dict:
{u'accountId': 000000,
u'action': u'update',
u'changeDetails': [{u'field': u'serial_number',
u'newValue': u'1354038941',
u'originalValue': u'1354038940'},
{u'field': u'updated_at',
u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',
u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],
u'domain': u'example.com',
u'targetId': 00000000,
u'targetType': u'Domain'} | 4.111469 | 4.025194 | 1.021434 |
uri = "/domains/%s/export" % utils.get_id(domain)
resp, resp_body = self._async_call(uri, method="GET",
error_class=exc.NotFound)
return resp_body.get("contents", "") | def export_domain(self, domain) | Provides the BIND (Berkeley Internet Name Domain) 9 formatted contents
of the requested domain. This call is for a single domain only, and as
such, does not provide subdomain information.
Sample export:
{u'accountId': 000000,
u'contentType': u'BIND_9',
u'contents': u'example.com.\t3600\tIN\tSOA\tns.rackspace.com. '
'foo@example.com. 1354202974 21600 3600 1814400 500'
'example.com.\t3600\tIN\tNS\tdns1.stabletransit.com.'
'example.com.\t3600\tIN\tNS\tdns2.stabletransit.com.',
u'id': 1111111} | 6.871746 | 5.382683 | 1.27664 |
uri = "/domains/import"
body = {"domains": [{
"contentType": "BIND_9",
"contents": domain_data,
}]}
resp, resp_body = self._async_call(uri, method="POST", body=body,
error_class=exc.DomainCreationFailed)
return resp_body | def import_domain(self, domain_data) | Takes a string in the BIND 9 format and creates a new domain. See the
'export_domain()' method for a description of the format. | 7.567207 | 5.914792 | 1.27937 |
if not any((emailAddress, ttl, comment)):
raise exc.MissingDNSSettings(
"No settings provided to update_domain().")
uri = "/domains/%s" % utils.get_id(domain)
body = {"comment": comment,
"ttl": ttl,
"emailAddress": emailAddress,
}
none_keys = [key for key, val in body.items()
if val is None]
for none_key in none_keys:
body.pop(none_key)
resp, resp_body = self._async_call(uri, method="PUT", body=body,
error_class=exc.DomainUpdateFailed, has_response=False)
return resp_body | def update_domain(self, domain, emailAddress=None, ttl=None, comment=None) | Provides a way to modify the following attributes of a domain
record:
- email address
- ttl setting
- comment | 3.728845 | 3.894907 | 0.957364 |
# The commented-out uri is the official API, but it is
# horribly slow.
# uri = "/domains/%s/subdomains" % utils.get_id(domain)
uri = "/domains?name=%s" % domain.name
page_qs = self._get_pagination_qs(limit, offset)
if page_qs:
uri = "%s&%s" % (uri, page_qs[1:])
return self._list_subdomains(uri, domain.id) | def list_subdomains(self, domain, limit=None, offset=None) | Returns a list of all subdomains of the specified domain. | 4.766439 | 4.806659 | 0.991632 |
uri = self._paging.get("subdomain", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of subdomains "
"to list.")
return self._list_subdomains(uri) | def list_subdomains_previous_page(self) | When paging through subdomain results, this will return the previous
page, using the same limit. If there are no more results, a
NoMoreResults exception will be raised. | 7.881765 | 6.053542 | 1.302009 |
uri = self._paging.get("subdomain", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of subdomains "
"to list.")
return self._list_subdomains(uri) | def list_subdomains_next_page(self) | When paging through subdomain results, this will return the next page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised. | 7.13267 | 5.810346 | 1.227581 |
uri = "/domains/%s/records%s" % (utils.get_id(domain),
self._get_pagination_qs(limit, offset))
return self._list_records(uri) | def list_records(self, domain, limit=None, offset=None) | Returns a list of all records configured for the specified domain. | 5.31268 | 5.021717 | 1.057941 |
uri = self._paging.get("record", {}).get("prev_uri")
if uri is None:
raise exc.NoMoreResults("There are no previous pages of records "
"to list.")
return self._list_records(uri) | def list_records_previous_page(self) | When paging through record results, this will return the previous page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised. | 7.886566 | 6.0023 | 1.313924 |
uri = self._paging.get("record", {}).get("next_uri")
if uri is None:
raise exc.NoMoreResults("There are no more pages of records to list.")
return self._list_records(uri) | def list_records_next_page(self) | When paging through record results, this will return the next page,
using the same limit. If there are no more results, a NoMoreResults
exception will be raised. | 6.829363 | 5.216067 | 1.309294 |
search_params = []
if name:
search_params.append("name=%s" % name)
if data:
search_params.append("data=%s" % data)
query_string = "&".join(search_params)
dom_id = utils.get_id(domain)
uri = "/domains/%s/records?type=%s" % (dom_id, record_type)
if query_string:
uri = "%s&%s" % (uri, query_string)
resp, body = self._retry_get(uri)
records = body.get("records", [])
self._reset_paging("record", body)
rec_paging = self._paging.get("record", {})
while rec_paging.get("next_uri"):
resp, body = self._retry_get(rec_paging.get("next_uri"))
self._reset_paging("record", body)
records.extend(body.get("records", []))
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record] | def search_records(self, domain, record_type, name=None, data=None) | Returns a list of all records configured for the specified domain that
match the supplied search criteria. | 2.468696 | 2.44252 | 1.010716 |
if isinstance(records, dict):
# Single record passed
records = [records]
dom_id = utils.get_id(domain)
uri = "/domains/%s/records" % dom_id
body = {"records": records}
resp, resp_body = self._async_call(uri, method="POST", body=body,
error_class=exc.DomainRecordAdditionFailed, has_response=False)
records = resp_body.get("response", {}).get("records", [])
for record in records:
record["domain_id"] = dom_id
return [CloudDNSRecord(self, record, loaded=False)
for record in records if record] | def add_records(self, domain, records) | Adds the records to this domain. Each record should be a dict with the
following keys:
- type (required)
- name (required)
- data (required)
- ttl (optional)
- comment (optional)
- priority (required for MX and SRV records; forbidden otherwise) | 4.144813 | 4.265825 | 0.971632 |
rec_id = utils.get_id(record)
domain_id = utils.get_id(domain)
uri = "/domains/%s/records/%s" % (domain_id, rec_id)
resp, resp_body = self._retry_get(uri)
resp_body["domain_id"] = domain_id
return CloudDNSRecord(self, resp_body, loaded=False) | def get_record(self, domain, record) | Gets the full information for an existing record for this domain. | 3.163123 | 3.070787 | 1.030069 |
rdict = {"id": record.id,
"name": record.name,
}
pdict = {"data": data,
"priority": priority,
"ttl": ttl,
"comment": comment,
}
utils.params_to_dict(pdict, rdict)
return self.update_records(domain, [rdict]) | def update_record(self, domain, record, data=None, priority=None,
ttl=None, comment=None) | Modifies an existing record for a domain. | 3.310323 | 3.407161 | 0.971578 |
if not isinstance(records, list):
raise TypeError("Expected records of type list")
uri = "/domains/%s/records" % utils.get_id(domain)
resp, resp_body = self._async_call(uri, method="PUT",
body={"records": records},
error_class=exc.DomainRecordUpdateFailed, has_response=False)
return resp_body | def update_records(self, domain, records) | Modifies an existing records for a domain. | 5.31697 | 5.13696 | 1.035042 |
uri = "/domains/%s/records/%s" % (utils.get_id(domain),
utils.get_id(record))
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainRecordDeletionFailed, has_response=False)
return resp_body | def delete_record(self, domain, record) | Deletes an existing record for a domain. | 5.502854 | 5.120878 | 1.074592 |
context = self.api.identity
region = self.api.region_name
if device_type.lower().startswith("load"):
ep = pyrax._get_service_endpoint(context, "load_balancer", region)
svc = "loadbalancers"
svc_name = "cloudLoadBalancers"
else:
ep = pyrax._get_service_endpoint(context, "compute", region)
svc = "servers"
svc_name = "cloudServersOpenStack"
href = "%s/%s/%s" % (ep, svc, utils.get_id(device))
return (href, svc_name) | def _get_ptr_details(self, device, device_type) | Takes a device and device type and returns the corresponding HREF link
and service name for use with PTR record management. | 3.857672 | 3.529458 | 1.092993 |
try:
from tests.unit import fakes
server_types = (pyrax.CloudServer, fakes.FakeServer)
lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer,
fakes.FakeDNSDevice)
except ImportError:
# Not running with tests
server_types = (pyrax.CloudServer, )
lb_types = (CloudLoadBalancer, )
if isinstance(device, server_types):
device_type = "server"
elif isinstance(device, lb_types):
device_type = "loadbalancer"
else:
raise exc.InvalidDeviceType("The device '%s' must be a CloudServer "
"or a CloudLoadBalancer." % device)
return device_type | def _resolve_device_type(self, device) | Given a device, determines if it is a CloudServer, a CloudLoadBalancer,
or an invalid device. | 3.53828 | 3.044657 | 1.162128 |
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
uri = "/rdns/%s?href=%s" % (svc_name, href)
try:
resp, resp_body = self._retry_get(uri)
except exc.NotFound:
return []
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records | def list_ptr_records(self, device) | Returns a list of all PTR records configured for this device. | 5.251343 | 5.125558 | 1.024541 |
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
if not isinstance(records, (list, tuple)):
records = [records]
body = {"recordsList": {
"records": records},
"link": {
"content": "",
"href": href,
"rel": svc_name,
}}
uri = "/rdns"
# This is a necessary hack, so here's why: if you attempt to add
# PTR records to device, and you don't have rights to either the device
# or the IP address, the DNS API will return a 401 - Unauthorized.
# Unfortunately, the pyrax client interprets this as a bad auth token,
# and there is no way to distinguish this from an actual authentication
# failure. The client will attempt to re-authenticate as a result, and
# will fail, due to the DNS API not having regional endpoints. The net
# result is that an EndpointNotFound exception will be raised, which
# we catch here and then raise a more meaningful exception.
# The Rackspace DNS team is working on changing this to return a 403
# instead; when that happens this kludge can go away.
try:
resp, resp_body = self._async_call(uri, body=body, method="POST",
error_class=exc.PTRRecordCreationFailed)
except exc.EndpointNotFound:
raise exc.InvalidPTRRecord("The domain/IP address information is not "
"valid for this device.")
return resp_body.get("records")
records = [CloudDNSPTRRecord(rec, device)
for rec in resp_body.get("records", [])]
return records | def add_ptr_records(self, device, records) | Adds one or more PTR records to the specified device. | 6.868372 | 6.691686 | 1.026404 |
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
try:
rec_id = record.id
except AttributeError:
rec_id = record
rec = {"name": domain_name,
"id": rec_id,
"type": "PTR",
"data": data,
}
if ttl is not None:
# Minimum TTL is 300 seconds
rec["ttl"] = max(300, ttl)
if comment is not None:
# Maximum comment length is 160 chars
rec["comment"] = comment[:160]
body = {"recordsList": {
"records": [rec]},
"link": {
"content": "",
"href": href,
"rel": svc_name,
}}
uri = "/rdns"
try:
resp, resp_body = self._async_call(uri, body=body, method="PUT",
has_response=False, error_class=exc.PTRRecordUpdateFailed)
except exc.EndpointNotFound as e:
raise exc.InvalidPTRRecord("The record domain/IP address "
"information is not valid for this device.")
return resp_body.get("status") == "COMPLETED" | def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None) | Updates a PTR record with the supplied values. | 4.269411 | 4.278834 | 0.997798 |
device_type = self._resolve_device_type(device)
href, svc_name = self._get_ptr_details(device, device_type)
uri = "/rdns/%s?href=%s" % (svc_name, href)
if ip_address:
uri = "%s&ip=%s" % (uri, ip_address)
resp, resp_body = self._async_call(uri, method="DELETE",
has_response=False,
error_class=exc.PTRRecordDeletionFailed)
return resp_body.get("status") == "COMPLETED" | def delete_ptr_records(self, device, ip_address=None) | Deletes the PTR records for the specified device. If 'ip_address' is
supplied, only the PTR records with that IP address will be deleted. | 4.620448 | 4.8106 | 0.960472 |
self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain,
response_key="domains", plural_response_key="domains",
uri_base="domains") | def _configure_manager(self) | Creates a manager to handle the instances, and another
to handle flavors. | 11.014369 | 8.893106 | 1.238529 |
for i in six.moves.range(3):
resp, body = super(CloudDNSClient, self).method_get(uri, **kwargs)
if body:
return resp, body
raise exc.ServiceResponseFailure("The Cloud DNS service failed to "
"respond to the request.") | def method_get(self, uri, **kwargs) | Overload the method_get function in order to retry on empty body
responses from the Cloud DNS API | 5.619972 | 4.26757 | 1.316902 |
return self._manager.list(limit=limit, offset=offset) | def list(self, limit=None, offset=None) | Returns a list of all resources. | 6.103431 | 5.025395 | 1.214518 |
return domain.update(emailAddress=emailAddress,
ttl=ttl, comment=comment) | def update_domain(self, domain, emailAddress=None, ttl=None, comment=None) | Provides a way to modify the following attributes of a domain
record:
- email address
- ttl setting
- comment | 4.15528 | 5.383335 | 0.771878 |
return domain.list_subdomains(limit=limit, offset=offset) | def list_subdomains(self, domain, limit=None, offset=None) | Returns a list of all subdomains for the specified domain. | 4.896149 | 4.305588 | 1.137162 |
return SubdomainResultsIterator(self._manager, domain=domain) | def get_subdomain_iterator(self, domain, limit=None, offset=None) | Returns an iterator that will return each available subdomain for the
specified domain. If there are more than the limit of 100 subdomains,
the iterator will continue to fetch subdomains from the API until all
subdomains have been returned. | 21.007044 | 20.491785 | 1.025145 |
return domain.list_records(limit=limit, offset=offset) | def list_records(self, domain, limit=None, offset=None) | Returns a list of all records configured for the specified domain. | 5.59339 | 5.068737 | 1.103508 |
return domain.search_records(record_type=record_type,
name=name, data=data) | def search_records(self, domain, record_type, name=None, data=None) | Returns a list of all records configured for the specified domain
that match the supplied search criteria. | 4.235752 | 4.91211 | 0.862308 |
return domain.find_record(record_type=record_type,
name=name, data=data) | def find_record(self, domain, record_type, name=None, data=None) | Returns a single record for this domain that matches the supplied
search criteria.
If no record matches, a DomainRecordNotFound exception will be raised.
If more than one matches, a DomainRecordNotUnique exception will
be raised. | 4.128239 | 4.249824 | 0.97139 |
return domain.update_record(record, data=data, priority=priority,
ttl=ttl, comment=comment) | def update_record(self, domain, record, data=None, priority=None, ttl=None,
comment=None) | Modifies an existing record for a domain. | 2.654579 | 3.245654 | 0.817887 |
return self._manager.update_ptr_record(device, record, domain_name,
data=data, ttl=ttl, comment=comment) | def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None) | Updates a PTR record with the supplied values. | 2.473288 | 2.937369 | 0.842008 |
return self._manager.delete_ptr_records(device, ip_address=ip_address) | def delete_ptr_records(self, device, ip_address=None) | Deletes the PTR records for the specified device. If 'ip_address'
is supplied, only the PTR records with that IP address will be deleted. | 3.97062 | 4.150051 | 0.956764 |
resp, body = self.method_get("/limits")
absolute_limits = body.get("limits", {}).get("absolute")
return absolute_limits | def get_absolute_limits(self) | Returns a dict with the absolute limits for the current account. | 7.506979 | 5.484077 | 1.368868 |
resp, body = self.method_get("/limits")
rate_limits = body.get("limits", {}).get("rate")
ret = []
for rate_limit in rate_limits:
limits = rate_limit["limit"]
uri_limits = {"uri": rate_limit["uri"],
"limits": limits}
ret.append(uri_limits)
return ret | def get_rate_limits(self) | Returns a dict with the current rate limit information for domain
and status requests. | 3.821431 | 3.809766 | 1.003062 |
try:
return self.results.pop(0)
except IndexError:
if self.next_uri is None:
raise StopIteration()
else:
if not self.next_uri:
if self.domain:
self.results = self.list_method(self.domain)
else:
self.results = self.list_method()
else:
args = self.extra_args
self.results = self._list_method(self.next_uri, *args)
self.next_uri = self.manager._paging.get(
self.paging_service, {}).get("next_uri")
# We should have more results.
try:
return self.results.pop(0)
except IndexError:
raise StopIteration() | def next(self) | Return the next available item. If there are no more items in the
local 'results' list, check if there is a 'next_uri' value. If so,
use that to get the next page of results from the API, and return
the first item from that query. | 3.61004 | 3.178415 | 1.135799 |
self.username = cfg.get("rackspace_cloud", "username")
try:
self.password = cfg.get("rackspace_cloud", "api_key", raw=True)
except ConfigParser.NoOptionError as e:
# Allow either the use of either 'api_key' or 'password'.
self.password = cfg.get("rackspace_cloud", "password", raw=True) | def _read_credential_file(self, cfg) | Parses the credential file with Rackspace-specific labels. | 3.209675 | 2.884642 | 1.112677 |
if self._creds_style == "apikey":
return {"auth": {"RAX-KSKEY:apiKeyCredentials":
{"username": "%s" % self.username,
"apiKey": "%s" % self.api_key}}}
else:
# Return in the default password-style
return super(RaxIdentity, self)._format_credentials() | def _format_credentials(self) | Returns the current credentials in the format expected by the
authentication service. Note that by default Rackspace credentials
expect 'api_key' instead of 'password'. However, if authentication
fails, return the creds in standard password format, in case they are
using a username / password combination. | 5.689245 | 3.928565 | 1.448174 |
self.api_key = None
super(RaxIdentity, self).set_credentials(username, password=password,
region=region, tenant_id=tenant_id, authenticate=authenticate) | def set_credentials(self, username, password=None, region=None,
tenant_id=None, authenticate=False) | Sets the username and password directly. Because Rackspace auth uses
the api_key, make sure that any old values are cleared. | 3.238908 | 2.915239 | 1.111026 |
try:
super(RaxIdentity, self).authenticate(username=username,
password=password, api_key=api_key, tenant_id=tenant_id)
except exc.AuthenticationFailed:
self._creds_style = "password"
super(RaxIdentity, self).authenticate(username=username,
password=password, api_key=api_key, tenant_id=tenant_id) | def authenticate(self, username=None, password=None, api_key=None,
tenant_id=None, connect=False) | If the user's credentials include an API key, the default behavior will
work. But if they are using a password, the initial attempt will fail,
so try again, but this time using the standard password format.
The 'connect' parameter is retained for backwards compatibility. It no
longer has any effect. | 2.541605 | 2.558035 | 0.993577 |
# Implementation note:
# Rackspace auth uses one tenant ID for the object_store services and
# another for everything else. The one that the user would know is the
# 'everything else' ID, so we need to extract the object_store tenant
# ID from the initial response, and call the superclass
# auth_with_token() method a second time with that tenant ID to get the
# object_store endpoints. We can then add these to the initial
# endpoints returned by the primary tenant ID, and then continue with
# the auth process.
main_resp, main_body = self._call_token_auth(token, tenant_id,
tenant_name)
# Get the swift tenant ID
roles = main_body["access"]["user"]["roles"]
ostore = [role for role in roles
if role["name"] == "object-store:default"]
if ostore:
ostore_tenant_id = ostore[0]["tenantId"]
ostore_resp, ostore_body = self._call_token_auth(token,
ostore_tenant_id, None)
ostore_cat = ostore_body["access"]["serviceCatalog"]
main_cat = main_body["access"]["serviceCatalog"]
main_cat.extend(ostore_cat)
self._parse_response(main_body)
self.authenticated = True | def auth_with_token(self, token, tenant_id=None, tenant_name=None) | If a valid token is already known, this call will use it to generate
the service catalog. | 5.211326 | 5.118849 | 1.018066 |
super(RaxIdentity, self)._parse_response(resp)
user = resp["access"]["user"]
defreg = user.get("RAX-AUTH:defaultRegion")
if defreg:
self._default_region = defreg | def _parse_response(self, resp) | Gets the authentication information from the returned JSON. | 7.75595 | 6.161024 | 1.258874 |
client_class = None
# Cloud Networks currently uses nova-networks, so it doesn't appear as
# a separate entry in the service catalog. This hack will allow context
# objects to continue to work with Rackspace Cloud Networks. When the
# Neutron service is implemented, this hack will have to be removed.
if service in ("compute:networks", "networks", "network",
"cloudnetworks", "cloud_networks"):
service = "compute"
client_class = CloudNetworkClient
return super(RaxIdentity, self).get_client(service, region,
public=public, cached=cached, client_class=client_class) | def get_client(self, service, region, public=True, cached=True) | Returns the client object for the specified service and region.
By default the public endpoint is used. If you wish to work with a
services internal endpoints, specify `public=False`.
By default, if a client has already been created for the given service,
region, and public values, that will be returned. To force a new client
to be created, pass 'cached=False'. | 6.366715 | 6.918338 | 0.920267 |
if user_id:
uri = "/users/%s" % user_id
elif username:
uri = "/users?name=%s" % username
elif email:
uri = "/users?email=%s" % email
else:
raise ValueError("You must include one of 'user_id', "
"'username', or 'email' when calling get_user().")
resp, resp_body = self.method_get(uri)
if resp.status_code == 404:
raise exc.NotFound("No such user exists.")
users = resp_body.get("users", [])
if users:
return [User(self, user) for user in users]
else:
user = resp_body.get("user", {})
if user:
return User(self, user)
else:
raise exc.NotFound("No such user exists.") | def get_user(self, user_id=None, username=None, email=None) | Returns the user specified by either ID, username or email.
Since more than user can have the same email address, searching by that
term will return a list of 1 or more User objects. Searching by
username or ID will return a single User.
If a user_id that doesn't belong to the current account is searched
for, a Forbidden exception is raised. When searching by username or
email, a NotFound exception is raised if there is no matching user. | 2.10698 | 2.147305 | 0.98122 |
user_id = utils.get_id(user)
uri = "users/%s" % user_id
upd = {"id": user_id}
if email is not None:
upd["email"] = email
if defaultRegion is not None:
upd["RAX-AUTH:defaultRegion"] = defaultRegion
if username is not None:
upd["username"] = username
if enabled is not None:
upd["enabled"] = enabled
data = {"user": upd}
resp, resp_body = self.method_put(uri, data=data)
if resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to update "
"users.")
return User(self, resp_body) | def update_user(self, user, email=None, username=None,
uid=None, defaultRegion=None, enabled=None) | Allows you to update settings for a given user. | 2.449545 | 2.527756 | 0.969059 |
if user is None:
user_id = utils.get_id(self)
else:
user_id = utils.get_id(user)
uri = "users/%s/OS-KSADM/credentials/" % user_id
uri += "RAX-KSKEY:apiKeyCredentials/RAX-AUTH/reset"
resp, resp_body = self.method_post(uri)
return resp_body.get("RAX-KSKEY:apiKeyCredentials", {}).get("apiKey") | def reset_api_key(self, user=None) | Resets the API key for the specified user, or if no user is specified,
for the current user. Returns the newly-created API key.
Resetting an API key does not invalidate any authenticated sessions,
nor does it revoke any tokens. | 3.860786 | 3.758228 | 1.027289 |
self.policies = [AutoScalePolicy(self.manager, dct, self)
for dct in self.scalingPolicies] | def _make_policies(self) | Convert the 'scalingPolicies' dictionary into AutoScalePolicy objects. | 14.704313 | 5.144391 | 2.858319 |
return self.manager.update(self, name=name,
cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata) | def update(self, name=None, cooldown=None, min_entities=None,
max_entities=None, metadata=None) | Updates this ScalingGroup. One or more of the attributes can be
specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method. | 2.087754 | 2.210243 | 0.944582 |
return self.manager.add_policy(self, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args) | def add_policy(self, name, policy_type, cooldown, change=None,
is_percent=False, desired_capacity=None, args=None) | Adds a policy with the given values to this scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage. | 1.910937 | 2.133497 | 0.895683 |
return self.manager.execute_policy(scaling_group=self, policy=policy) | def execute_policy(self, policy) | Executes the specified policy for this scaling group. | 9.992495 | 5.063925 | 1.973271 |
return self.manager.delete_policy(scaling_group=self, policy=policy) | def delete_policy(self, policy) | Deletes the specified policy from this scaling group. | 8.601254 | 5.03327 | 1.70888 |
return self.manager.add_webhook(self, policy, name, metadata=metadata) | def add_webhook(self, policy, name, metadata=None) | Adds a webhook to the specified policy. | 4.101673 | 4.441325 | 0.923525 |
return self.manager.update_webhook_metadata(self, policy, webhook,
metadata) | def update_webhook_metadata(self, policy, webhook, metadata) | Adds the given metadata dict to the existing metadata for the specified
webhook. | 5.872002 | 7.666987 | 0.765881 |
return self.manager.delete_webhook(self, policy, webhook) | def delete_webhook(self, policy, webhook) | Deletes the specified webhook from the specified policy. | 5.935693 | 6.568032 | 0.903725 |
uri = "/%s/%s/state" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
data = resp_body["group"]
ret = {}
ret["active"] = [itm["id"] for itm in data["active"]]
ret["active_capacity"] = data["activeCapacity"]
ret["desired_capacity"] = data["desiredCapacity"]
ret["pending_capacity"] = data["pendingCapacity"]
ret["paused"] = data["paused"]
return ret | def get_state(self, scaling_group) | Returns the current state of the specified scaling group as a
dictionary. | 3.118223 | 2.930235 | 1.064154 |
uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_post(uri)
return None | def pause(self, scaling_group) | Pauses all execution of the policies for the specified scaling group. | 4.806551 | 4.611275 | 1.042348 |
uri = "/%s/%s/config" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
return resp_body.get("groupConfiguration") | def get_configuration(self, scaling_group) | Returns the scaling group's configuration in a dictionary. | 4.702522 | 4.050481 | 1.160979 |
body = self._create_group_config_body(name, cooldown, min_entities,
max_entities, metadata=metadata)
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/config" % (self.uri_base, group_id)
resp, resp_body = self.api.method_put(uri, body=body) | def replace(self, scaling_group, name, cooldown, min_entities,
max_entities, metadata=None) | Replace an existing ScalingGroup configuration. All of the attributes
must be specified If you wish to delete any of the optional attributes,
pass them in as None. | 2.63766 | 2.710748 | 0.973038 |
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
uri = "/%s/%s/config" % (self.uri_base, scaling_group.id)
if cooldown is None:
cooldown = scaling_group.cooldown
if min_entities is None:
min_entities = scaling_group.min_entities
if max_entities is None:
max_entities = scaling_group.max_entities
body = {"name": name or scaling_group.name,
"cooldown": cooldown,
"minEntities": min_entities,
"maxEntities": max_entities,
"metadata": metadata or scaling_group.metadata,
}
resp, resp_body = self.api.method_put(uri, body=body)
return None | def update(self, scaling_group, name=None, cooldown=None,
min_entities=None, max_entities=None, metadata=None) | Updates an existing ScalingGroup. One or more of the attributes can
be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_metadata() method. | 1.741427 | 1.819098 | 0.957303 |
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
curr_meta = scaling_group.metadata
curr_meta.update(metadata)
return self.update(scaling_group, metadata=curr_meta) | def update_metadata(self, scaling_group, metadata) | Adds the given metadata dict to the existing metadata for the scaling
group. | 2.417117 | 2.338022 | 1.03383 |
key_map = {
"OS-DCF:diskConfig": "disk_config",
"flavorRef": "flavor",
"imageRef": "image",
}
uri = "/%s/%s/launch" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
ret = {}
data = resp_body.get("launchConfiguration")
ret["type"] = data.get("type")
args = data.get("args", {})
ret["load_balancers"] = args.get("loadBalancers")
for key, value in args.get("server", {}).items():
norm_key = key_map.get(key, key)
ret[norm_key] = value
return ret | def get_launch_config(self, scaling_group) | Returns the launch configuration for the specified scaling group. | 2.880131 | 2.811884 | 1.024271 |
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/launch" % (self.uri_base, group_id)
body = self._create_launch_config_body(
launch_config_type=launch_config_type, server_name=server_name,
image=image, flavor=flavor, disk_config=disk_config,
metadata=metadata, personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
resp, resp_body = self.api.method_put(uri, body=body) | def replace_launch_config(self, scaling_group, launch_config_type,
server_name, image, flavor, disk_config=None, metadata=None,
personality=None, networks=None, load_balancers=None,
key_name=None, config_drive=False, user_data=None) | Replace an existing launch configuration. All of the attributes must be
specified. If you wish to delete any of the optional attributes, pass
them in as None. | 1.77201 | 1.81568 | 0.975949 |
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
uri = "/%s/%s/launch" % (self.uri_base, scaling_group.id)
largs = scaling_group.launchConfiguration.get("args", {})
srv_args = largs.get("server", {})
lb_args = largs.get("loadBalancers", {})
flav = flavor or srv_args.get("flavorRef")
dconf = disk_config or srv_args.get("OS-DCF:diskConfig", "AUTO")
if personality is None:
personality = srv_args.get("personality", [])
cfg_drv = config_drive or srv_args.get("config_drive")
if user_data:
user_data = base64.b64encode(user_data)
usr_data = user_data or srv_args.get("user_data")
update_metadata = metadata or srv_args.get("metadata")
body = {"type": "launch_server",
"args": {
"server": {
"name": server_name or srv_args.get("name"),
"imageRef": image or srv_args.get("imageRef"),
"flavorRef": flav,
"OS-DCF:diskConfig": dconf,
"networks": networks or srv_args.get("networks"),
},
"loadBalancers": load_balancers or lb_args,
},
}
bas = body["args"]["server"]
if cfg_drv:
bas["config_drive"] = cfg_drv
if usr_data:
bas["user_data"] = usr_data
if personality:
bas["personality"] = self._encode_personality(personality)
if update_metadata:
bas["metadata"] = update_metadata
key_name = key_name or srv_args.get("key_name")
if key_name:
bas["key_name"] = key_name
resp, resp_body = self.api.method_put(uri, body=body)
return None | def update_launch_config(self, scaling_group, server_name=None, image=None,
flavor=None, disk_config=None, metadata=None, personality=None,
networks=None, load_balancers=None, key_name=None, config_drive=False,
user_data=None) | Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method. | 2.070188 | 2.113799 | 0.979368 |
if not isinstance(scaling_group, ScalingGroup):
scaling_group = self.get(scaling_group)
curr_meta = scaling_group.launchConfiguration.get("args", {}).get(
"server", {}).get("metadata", {})
curr_meta.update(metadata)
return self.update_launch_config(scaling_group, metadata=curr_meta) | def update_launch_metadata(self, scaling_group, metadata) | Adds the given metadata dict to the existing metadata for the scaling
group's launch configuration. | 3.18265 | 3.262189 | 0.975618 |
uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group))
body = self._create_policy_body(name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
# "body" needs to be a list
body = [body]
resp, resp_body = self.api.method_post(uri, body=body)
pol_info = resp_body.get("policies")[0]
return AutoScalePolicy(self, pol_info, scaling_group) | def add_policy(self, scaling_group, name, policy_type, cooldown,
change=None, is_percent=False, desired_capacity=None, args=None) | Adds a policy with the given values to the specified scaling group. The
'change' parameter is treated as an absolute amount, unless
'is_percent' is True, in which case it is treated as a percentage. | 2.791826 | 2.79162 | 1.000074 |
uri = "/%s/%s/policies" % (self.uri_base, utils.get_id(scaling_group))
resp, resp_body = self.api.method_get(uri)
return [AutoScalePolicy(self, data, scaling_group)
for data in resp_body.get("policies", [])] | def list_policies(self, scaling_group) | Returns a list of all policies defined for the specified scaling group. | 3.855747 | 3.606519 | 1.069105 |
policy_id = utils.get_id(policy)
group_id = utils.get_id(scaling_group)
uri = "/%s/%s/policies/%s" % (self.uri_base, group_id, policy_id)
body = self._create_policy_body(name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
resp, resp_body = self.api.method_put(uri, body=body) | def replace_policy(self, scaling_group, policy, name,
policy_type, cooldown, change=None, is_percent=False,
desired_capacity=None, args=None) | Replace an existing policy. All of the attributes must be specified. If
you wish to delete any of the optional attributes, pass them in as
None. | 1.892261 | 1.918216 | 0.986469 |
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
if not isinstance(policy, AutoScalePolicy):
# Received an ID
policy = self.get_policy(scaling_group, policy)
body = {"name": name or policy.name,
"type": policy_type or policy.type,
"cooldown": cooldown or policy.cooldown,
}
if desired_capacity is not None:
body["desiredCapacity"] = desired_capacity
elif change is not None:
if is_percent:
body["changePercent"] = change
else:
body["change"] = change
else:
if getattr(policy, "changePercent", None) is not None:
body["changePercent"] = policy.changePercent
elif getattr(policy, "change", None) is not None:
body["change"] = policy.change
elif getattr(policy, "desiredCapacity", None) is not None:
body["desiredCapacity"] = policy.desiredCapacity
args = args or getattr(policy, "args", None)
if args is not None:
body["args"] = args
resp, resp_body = self.api.method_put(uri, body=body)
return None | def update_policy(self, scaling_group, policy, name=None, policy_type=None,
cooldown=None, change=None, is_percent=False,
desired_capacity=None, args=None) | Updates the specified policy. One or more of the parameters may be
specified. | 1.948509 | 1.987108 | 0.980575 |
uri = "/%s/%s/policies/%s/execute" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_post(uri)
return None | def execute_policy(self, scaling_group, policy) | Executes the specified policy for this scaling group. | 3.948328 | 3.664574 | 1.077432 |
uri = "/%s/%s/policies/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_delete(uri) | def delete_policy(self, scaling_group, policy) | Deletes the specified policy from the scaling group. | 3.297832 | 3.169999 | 1.040326 |
uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
body = self._create_webhook_body(name, metadata=metadata)
# "body" needs to be a list
body = [body]
resp, resp_body = self.api.method_post(uri, body=body)
data = resp_body.get("webhooks")[0]
return AutoScaleWebhook(self, data, policy, scaling_group) | def add_webhook(self, scaling_group, policy, name, metadata=None) | Adds a webhook to the specified policy. | 3.171602 | 3.226175 | 0.983084 |
uri = "/%s/%s/policies/%s/webhooks" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy))
resp, resp_body = self.api.method_get(uri)
return [AutoScaleWebhook(self, data, policy, scaling_group)
for data in resp_body.get("webhooks", [])] | def list_webhooks(self, scaling_group, policy) | Returns a list of all webhooks for the specified policy. | 3.110342 | 3.050466 | 1.019628 |
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
group_id = utils.get_id(scaling_group)
policy_id = utils.get_id(policy)
webhook_id = utils.get_id(webhook)
body = self._create_webhook_body(name, metadata=metadata)
resp, resp_body = self.api.method_put(uri, body=body) | def replace_webhook(self, scaling_group, policy, webhook, name,
metadata=None) | Replace an existing webhook. All of the attributes must be specified.
If you wish to delete any of the optional attributes, pass them in as
None. | 2.095056 | 2.127296 | 0.984845 |
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
if not isinstance(webhook, AutoScaleWebhook):
# Received an ID
webhook = self.get_webhook(scaling_group, policy, webhook)
body = {"name": name or webhook.name,
"metadata": metadata or webhook.metadata,
}
resp, resp_body = self.api.method_put(uri, body=body)
webhook.reload()
return webhook | def update_webhook(self, scaling_group, policy, webhook, name=None,
metadata=None) | Updates the specified webhook. One or more of the parameters may be
specified. | 2.72615 | 2.902948 | 0.939097 |
if not isinstance(webhook, AutoScaleWebhook):
webhook = self.get_webhook(scaling_group, policy, webhook)
curr_meta = webhook.metadata or {}
curr_meta.update(metadata)
return self.update_webhook(scaling_group, policy, webhook,
metadata=curr_meta) | def update_webhook_metadata(self, scaling_group, policy, webhook, metadata) | Adds the given metadata dict to the existing metadata for the specified
webhook. | 2.830717 | 2.861729 | 0.989163 |
uri = "/%s/%s/policies/%s/webhooks/%s" % (self.uri_base,
utils.get_id(scaling_group), utils.get_id(policy),
utils.get_id(webhook))
resp, resp_body = self.api.method_delete(uri)
return None | def delete_webhook(self, scaling_group, policy, webhook) | Deletes the specified webhook from the specified policy. | 2.761866 | 2.818264 | 0.979989 |
lb_args = []
if not isinstance(load_balancers, list):
lbs = [load_balancers]
else:
lbs = load_balancers
for lb in lbs:
if isinstance(lb, dict):
lb_args.append(lb)
elif isinstance(lb, CloudLoadBalancer):
lb_args.append({
"loadBalancerId": lb.id,
"port": lb.port,
})
elif isinstance(lb, tuple):
lb_args.append({"loadBalancerId": lb[0],
"port": lb[1]})
else:
# See if it's an ID for a Load Balancer
try:
instance = pyrax.cloud_loadbalancers.get(lb)
except Exception:
raise exc.InvalidLoadBalancer("Received an invalid "
"specification for a Load Balancer: '%s'" % lb)
lb_args.append({
"loadBalancerId": instance.id,
"port": instance.port,
})
return lb_args | def _resolve_lbs(load_balancers) | Takes either a single LB reference or a list of references and returns
the dictionary required for creating a Scaling Group.
References can be either a dict that matches the structure required by
the autoscale API, a CloudLoadBalancer instance, or the ID of the load
balancer. | 2.368604 | 2.223725 | 1.065152 |
if personality is None:
personality = []
else:
personality = utils.coerce_to_list(personality)
for pfile in personality:
if "contents" in pfile:
pfile["contents"] = base64.b64encode(pfile["contents"])
return personality | def _encode_personality(self, personality) | Personality files must be base64-encoded before transmitting. | 2.895431 | 2.587793 | 1.118881 |
if metadata is None:
metadata = {}
if scaling_policies is None:
scaling_policies = []
group_config = self._create_group_config_body(name, cooldown,
min_entities, max_entities, metadata=group_metadata)
launch_config = self._create_launch_config_body(launch_config_type,
server_name, image, flavor, disk_config=disk_config,
metadata=metadata, personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
body = {
"groupConfiguration": group_config,
"launchConfiguration": launch_config,
"scalingPolicies": scaling_policies,
}
return body | def _create_body(self, name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor, disk_config=None,
metadata=None, personality=None, networks=None,
load_balancers=None, scaling_policies=None, group_metadata=None,
key_name=None, config_drive=False, user_data=None) | Used to create the dict required to create any of the following:
A Scaling Group | 1.639525 | 1.722409 | 0.951879 |
return self.manager.add_webhook(self.scaling_group, self, name,
metadata=metadata) | def add_webhook(self, name, metadata=None) | Adds a webhook to this policy. | 9.429233 | 8.188479 | 1.151524 |
return self.manager.get_webhook(self.scaling_group, self, webhook) | def get_webhook(self, webhook) | Gets the detail for the specified webhook. | 13.02921 | 11.529983 | 1.130029 |
return self.manager.update_webhook(self.scaling_group, policy=self,
webhook=webhook, name=name, metadata=metadata) | def update_webhook(self, webhook, name=None, metadata=None) | Updates the specified webhook. One or more of the parameters may be
specified. | 6.794297 | 9.056647 | 0.7502 |
return self.manager.update_webhook_metadata(self.scaling_group, self,
webhook, metadata) | def update_webhook_metadata(self, webhook, metadata) | Adds the given metadata dict to the existing metadata for the specified
webhook. | 10.10389 | 11.845885 | 0.852945 |
return self.manager.delete_webhook(self.scaling_group, self, webhook) | def delete_webhook(self, webhook) | Deletes the specified webhook from this policy. | 12.052839 | 10.412824 | 1.1575 |
return self.policy.update_webhook(self, name=name, metadata=metadata) | def update(self, name=None, metadata=None) | Updates this webhook. One or more of the parameters may be specified. | 8.475001 | 6.369341 | 1.330593 |
self._manager = ScalingGroupManager(self,
resource_class=ScalingGroup, response_key="group",
uri_base="groups") | def _configure_manager(self) | Creates a manager to handle autoscale operations. | 14.793197 | 9.177746 | 1.611855 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.