sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def get_tags(self):
"""List all tags as Tag objects."""
res = self.get_request('/tag')
return [Tag(cloud_manager=self, **tag) for tag in res['tags']['tag']]
|
List all tags as Tag objects.
|
entailment
|
def get_tag(self, name):
"""Return the tag as Tag object."""
res = self.get_request('/tag/' + name)
return Tag(cloud_manager=self, **res['tag'])
|
Return the tag as Tag object.
|
entailment
|
def create_tag(self, name, description=None, servers=[]):
"""
Create a new Tag. Only name is mandatory.
Returns the created Tag object.
"""
servers = [str(server) for server in servers]
body = {'tag': Tag(name, description, servers).to_dict()}
res = self.request('POST', '/tag', body)
return Tag(cloud_manager=self, **res['tag'])
|
Create a new Tag. Only name is mandatory.
Returns the created Tag object.
|
entailment
|
def _modify_tag(self, name, description, servers, new_name):
"""
PUT /tag/name. Returns a dict that can be used to create a Tag object.
Private method used by the Tag class and TagManager.modify_tag.
"""
body = {'tag': Tag(new_name, description, servers).to_dict()}
res = self.request('PUT', '/tag/' + name, body)
return res['tag']
|
PUT /tag/name. Returns a dict that can be used to create a Tag object.
Private method used by the Tag class and TagManager.modify_tag.
|
entailment
|
def modify_tag(self, name, description=None, servers=None, new_name=None):
"""
PUT /tag/name. Returns a new Tag object based on the API response.
"""
res = self._modify_tag(name, description, servers, new_name)
return Tag(cloud_manager=self, **res['tag'])
|
PUT /tag/name. Returns a new Tag object based on the API response.
|
entailment
|
def remove_tags(self, server, tags):
"""
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
"""
uuid = str(server)
tags = [str(tag) for tag in tags]
url = '/server/{0}/untag/{1}'.format(uuid, ','.join(tags))
return self.post_request(url)
|
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
|
entailment
|
def assignIfExists(opts, default=None, **kwargs):
"""
Helper for assigning object attributes from API responses.
"""
for opt in opts:
if(opt in kwargs):
return kwargs[opt]
return default
|
Helper for assigning object attributes from API responses.
|
entailment
|
def try_it_n_times(operation, expected_error_codes, custom_error='operation failed', n=10):
"""
Try a given operation (API call) n times.
Raises if the API call fails with an error_code that is not expected.
Raises if the API call has not succeeded within n attempts.
Waits 3 seconds betwee each attempt.
"""
for i in itertools.count():
try:
operation()
break
except UpCloudAPIError as e:
if e.error_code not in expected_error_codes:
raise e
sleep(3)
if i >= n - 1:
raise UpCloudClientError(custom_error)
|
Try a given operation (API call) n times.
Raises if the API call fails with an error_code that is not expected.
Raises if the API call has not succeeded within n attempts.
Waits 3 seconds betwee each attempt.
|
entailment
|
def hkdf_extract(salt, input_key_material, hash=hashlib.sha512):
'''
Extract a pseudorandom key suitable for use with hkdf_expand
from the input_key_material and a salt using HMAC with the
provided hash (default SHA-512).
salt should be a random, application-specific byte string. If
salt is None or the empty string, an all-zeros string of the same
length as the hash's block size will be used instead per the RFC.
See the HKDF draft RFC and paper for usage notes.
'''
hash_len = hash().digest_size
if salt == None or len(salt) == 0:
salt = bytearray((0,) * hash_len)
return hmac.new(bytes(salt), buffer(input_key_material), hash).digest()
|
Extract a pseudorandom key suitable for use with hkdf_expand
from the input_key_material and a salt using HMAC with the
provided hash (default SHA-512).
salt should be a random, application-specific byte string. If
salt is None or the empty string, an all-zeros string of the same
length as the hash's block size will be used instead per the RFC.
See the HKDF draft RFC and paper for usage notes.
|
entailment
|
def hkdf_expand(pseudo_random_key, info=b"", length=32, hash=hashlib.sha512):
'''
Expand `pseudo_random_key` and `info` into a key of length `bytes` using
HKDF's expand function based on HMAC with the provided hash (default
SHA-512). See the HKDF draft RFC and paper for usage notes.
'''
hash_len = hash().digest_size
length = int(length)
if length > 255 * hash_len:
raise Exception("Cannot expand to more than 255 * %d = %d bytes using the specified hash function" %\
(hash_len, 255 * hash_len))
blocks_needed = length // hash_len + (0 if length % hash_len == 0 else 1) # ceil
okm = b""
output_block = b""
for counter in range(blocks_needed):
output_block = hmac.new(pseudo_random_key, buffer(output_block + info + bytearray((counter + 1,))),\
hash).digest()
okm += output_block
return okm[:length]
|
Expand `pseudo_random_key` and `info` into a key of length `bytes` using
HKDF's expand function based on HMAC with the provided hash (default
SHA-512). See the HKDF draft RFC and paper for usage notes.
|
entailment
|
def expand(self, info=b"", length=32):
'''
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
'''
return hkdf_expand(self._prk, info, length, self._hash)
|
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
|
entailment
|
def login_user_block(username, ssh_keys, create_password=True):
"""
Helper function for creating Server.login_user blocks.
(see: https://www.upcloud.com/api/8-servers/#create-server)
"""
block = {
'create_password': 'yes' if create_password is True else 'no',
'ssh_keys': {
'ssh_key': ssh_keys
}
}
if username:
block['username'] = username
return block
|
Helper function for creating Server.login_user blocks.
(see: https://www.upcloud.com/api/8-servers/#create-server)
|
entailment
|
def _reset(self, server, **kwargs):
"""
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
"""
if server:
# handle storage, ip_address dicts and tags if they exist
Server._handle_server_subobjs(server, kwargs.get('cloud_manager'))
for key in server:
object.__setattr__(self, key, server[key])
for key in kwargs:
object.__setattr__(self, key, kwargs[key])
|
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
|
entailment
|
def populate(self):
"""
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
"""
server, IPAddresses, storages = self.cloud_manager.get_server_data(self.uuid)
self._reset(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True
)
return self
|
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
|
entailment
|
def save(self):
"""
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
"""
# dict comprehension that also works with 2.6
# http://stackoverflow.com/questions/21069668/alternative-to-dict-comprehension-prior-to-python-2-7
kwargs = dict(
(field, getattr(self, field))
for field in self.updateable_fields
if hasattr(self, field)
)
self.cloud_manager.modify_server(self.uuid, **kwargs)
self._reset(kwargs)
|
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
|
entailment
|
def shutdown(self, hard=False, timeout=30):
"""
Shutdown/stop the server. By default, issue a soft shutdown with a timeout of 30s.
After the a timeout a hard shutdown is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['stop_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': '{0}'.format(timeout)
}
path = '/server/{0}/stop'.format(self.uuid)
self.cloud_manager.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance')
|
Shutdown/stop the server. By default, issue a soft shutdown with a timeout of 30s.
After the a timeout a hard shutdown is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
|
entailment
|
def start(self, timeout=120):
"""
Start the server. Note: slow and blocking request.
The API waits for confirmation from UpCloud's IaaS backend before responding.
"""
path = '/server/{0}/start'.format(self.uuid)
self.cloud_manager.post_request(path, timeout=timeout)
object.__setattr__(self, 'state', 'started')
|
Start the server. Note: slow and blocking request.
The API waits for confirmation from UpCloud's IaaS backend before responding.
|
entailment
|
def restart(self, hard=False, timeout=30, force=True):
"""
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['restart_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': '{0}'.format(timeout),
'timeout_action': 'destroy' if force else 'ignore'
}
path = '/server/{0}/restart'.format(self.uuid)
self.cloud_manager.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance')
|
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
|
entailment
|
def add_ip(self, family='IPv4'):
"""
Allocate a new (random) IP-address to the Server.
"""
IP = self.cloud_manager.attach_ip(self.uuid, family)
self.ip_addresses.append(IP)
return IP
|
Allocate a new (random) IP-address to the Server.
|
entailment
|
def remove_ip(self, IPAddress):
"""
Release the specified IP-address from the server.
"""
self.cloud_manager.release_ip(IPAddress.address)
self.ip_addresses.remove(IPAddress)
|
Release the specified IP-address from the server.
|
entailment
|
def add_storage(self, storage=None, type='disk', address=None):
"""
Attach the given storage to the Server.
Default address is next available.
"""
self.cloud_manager.attach_storage(server=self.uuid,
storage=storage.uuid,
storage_type=type,
address=address)
storage.address = address
storage.type = type
self.storage_devices.append(storage)
|
Attach the given storage to the Server.
Default address is next available.
|
entailment
|
def remove_storage(self, storage):
"""
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
"""
if not hasattr(storage, 'address'):
raise Exception(
('Storage does not have an address. '
'Access the Storage via Server.storage_devices '
'so they include an address. '
'(This is due how the API handles Storages)')
)
self.cloud_manager.detach_storage(server=self.uuid, address=storage.address)
self.storage_devices.remove(storage)
|
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
|
entailment
|
def add_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.assign_tags(self.uuid, tags):
tags = self.tags + [str(tag) for tag in tags]
object.__setattr__(self, 'tags', tags)
|
Add tags to a server. Accepts tags as strings or Tag objects.
|
entailment
|
def remove_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.remove_tags(self, tags):
new_tags = [tag for tag in self.tags if tag not in tags]
object.__setattr__(self, 'tags', new_tags)
|
Add tags to a server. Accepts tags as strings or Tag objects.
|
entailment
|
def configure_firewall(self, FirewallRules):
"""
Helper function for automatically adding several FirewallRules in series.
"""
firewall_rule_bodies = [
FirewallRule.to_dict()
for FirewallRule in FirewallRules
]
return self.cloud_manager.configure_firewall(self, firewall_rule_bodies)
|
Helper function for automatically adding several FirewallRules in series.
|
entailment
|
def prepare_post_body(self):
"""
Prepare a JSON serializable dict from a Server instance with nested.
Storage instances.
"""
body = dict()
# mandatory
body['server'] = {
'hostname': self.hostname,
'zone': self.zone,
'title': self.title,
'storage_devices': {}
}
# optional fields
for optional_field in self.optional_fields:
if hasattr(self, optional_field):
body['server'][optional_field] = getattr(self, optional_field)
# set password_delivery default as 'none' to prevent API from sending
# emails (with credentials) about each created server
if not hasattr(self, 'password_delivery'):
body['server']['password_delivery'] = 'none'
# collect storage devices and create a unique title (see: Storage.title in API doc)
# for each of them
body['server']['storage_devices'] = {
'storage_device': []
}
storage_title_id = 0 # running number for unique storage titles
for storage in self.storage_devices:
if not hasattr(storage, 'os') or storage.os is None:
storage_title_id += 1
storage_body = storage.to_dict()
# setup default titles for storages unless the user has specified
# them at storage.title
if not hasattr(storage, 'title') or not storage.title:
if hasattr(storage, 'os') and storage.os:
storage_body['title'] = self.hostname + ' OS disk'
else:
storage_body['title'] = self.hostname + ' storage disk ' + str(storage_title_id)
# figure out the storage `action` parameter
# public template
if hasattr(storage, 'os') and storage.os:
storage_body['action'] = 'clone'
storage_body['storage'] = OperatingSystems.get_OS_UUID(storage.os)
# private template
elif hasattr(storage, 'uuid'):
storage_body['action'] = 'clone'
storage_body['storage'] = storage.uuid
# create a new storage
else:
storage_body['action'] = 'create'
body['server']['storage_devices']['storage_device'].append(storage_body)
if hasattr(self, 'ip_addresses') and self.ip_addresses:
body['server']['ip_addresses'] = {
'ip_address': [
ip.to_dict() for ip in self.ip_addresses
]
}
return body
|
Prepare a JSON serializable dict from a Server instance with nested.
Storage instances.
|
entailment
|
def to_dict(self):
"""
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
"""
fields = dict(vars(self).items())
if self.populated:
fields['ip_addresses'] = []
fields['storage_devices'] = []
for ip in self.ip_addresses:
fields['ip_addresses'].append({
'address': ip.address,
'access': ip.access,
'family': ip.family
})
for storage in self.storage_devices:
fields['storage_devices'].append({
'address': storage.address,
'storage': storage.uuid,
'storage_size': storage.size,
'storage_title': storage.title,
'type': storage.type,
})
del fields['populated']
del fields['cloud_manager']
return fields
|
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
|
entailment
|
def get_ip(self, access='public', addr_family=None, strict=None):
"""
Return the server's IP address.
Params:
- addr_family: IPv4, IPv6 or None. None prefers IPv4 but will
return IPv6 if IPv4 addr was not available.
- access: 'public' or 'private'
"""
if addr_family not in ['IPv4', 'IPv6', None]:
raise Exception("`addr_family` must be 'IPv4', 'IPv6' or None")
if access not in ['private', 'public']:
raise Exception("`access` must be 'public' or 'private'")
if not hasattr(self, 'ip_addresses'):
self.populate()
# server can have several public or private IPs
ip_addrs = [
ip_addr for ip_addr in self.ip_addresses
if ip_addr.access == access
]
# prefer addr_family (or IPv4 if none given)
preferred_family = addr_family if addr_family else 'IPv4'
for ip_addr in ip_addrs:
if ip_addr.family == preferred_family:
return ip_addr.address
# any IP (of the right access) will do if available and addr_family is None
return ip_addrs[0].address if ip_addrs and not addr_family else None
|
Return the server's IP address.
Params:
- addr_family: IPv4, IPv6 or None. None prefers IPv4 but will
return IPv6 if IPv4 addr was not available.
- access: 'public' or 'private'
|
entailment
|
def get_public_ip(self, addr_family=None, *args, **kwargs):
"""Alias for get_ip('public')"""
return self.get_ip('public', addr_family, *args, **kwargs)
|
Alias for get_ip('public')
|
entailment
|
def get_private_ip(self, addr_family=None, *args, **kwargs):
"""Alias for get_ip('private')"""
return self.get_ip('private', addr_family, *args, **kwargs)
|
Alias for get_ip('private')
|
entailment
|
def _wait_for_state_change(self, target_states, update_interval=10):
"""
Blocking wait until target_state reached. update_interval is in seconds.
Warning: state change must begin before calling this method.
"""
while self.state not in target_states:
if self.state == 'error':
raise Exception('server is in error state')
# update server state every 10s
sleep(update_interval)
self.populate()
|
Blocking wait until target_state reached. update_interval is in seconds.
Warning: state change must begin before calling this method.
|
entailment
|
def ensure_started(self):
"""
Start a server and waits (blocking wait) until it is fully started.
"""
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'stopped':
self.start()
self._wait_for_state_change(['started'])
if self.state == 'started':
return True
else:
# something went wrong, fail explicitly
raise Exception('unknown server state: ' + self.state)
|
Start a server and waits (blocking wait) until it is fully started.
|
entailment
|
def stop_and_destroy(self, sync=True):
"""
Destroy a server and its storages. Stops the server before destroying.
Syncs the server state from the API, use sync=False to disable.
"""
def _self_destruct():
"""destroy the server and all storages attached to it."""
# try_it_n_times util is used as a convenience because
# Servers and Storages can fluctuate between "maintenance" and their
# original state due to several different reasons especially when
# destroying infrastructure.
# first destroy server
try_it_n_times(operation=self.destroy,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='destroying server failed')
# storages may be deleted instantly after server DELETE
for storage in self.storage_devices:
try_it_n_times(operation=storage.destroy,
expected_error_codes=['STORAGE_STATE_ILLEGAL'],
custom_error='destroying storage failed')
if sync:
self.populate()
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'started':
try_it_n_times(operation=self.stop,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='stopping server failed')
self._wait_for_state_change(['stopped'])
if self.state == 'stopped':
_self_destruct()
else:
raise Exception('unknown server state: ' + self.state)
|
Destroy a server and its storages. Stops the server before destroying.
Syncs the server state from the API, use sync=False to disable.
|
entailment
|
def revert(self):
"""Revert the state to the version stored on disc."""
if self.filepath:
if path.isfile(self.filepath):
serialised_file = open(self.filepath, "r")
try:
self.state = json.load(serialised_file)
except ValueError:
print("No JSON information could be read from the persistence file - could be empty: %s" % self.filepath)
self.state = {}
finally:
serialised_file.close()
else:
print("The persistence file has not yet been created or does not exist, so the state cannot be read from it yet.")
else:
print("Filepath to the persistence file is not set. State cannot be read.")
return False
|
Revert the state to the version stored on disc.
|
entailment
|
def sync(self):
"""Synchronise and update the stored state to the in-memory state."""
if self.filepath:
serialised_file = open(self.filepath, "w")
json.dump(self.state, serialised_file)
serialised_file.close()
else:
print("Filepath to the persistence file is not set. State cannot be synced to disc.")
|
Synchronise and update the stored state to the in-memory state.
|
entailment
|
def _reset(self, **kwargs):
"""
Reset after repopulating from API (or when initializing).
"""
# set object attributes from params
for key in kwargs:
setattr(self, key, kwargs[key])
# set defaults (if need be) where the default is not None
for attr in self.ATTRIBUTES:
if not hasattr(self, attr) and self.ATTRIBUTES[attr] is not None:
setattr(self, attr, self.ATTRIBUTES[attr])
|
Reset after repopulating from API (or when initializing).
|
entailment
|
def to_dict(self):
"""
Return a dict that can be serialised to JSON and sent to UpCloud's API.
"""
return dict(
(attr, getattr(self, attr))
for attr in self.ATTRIBUTES
if hasattr(self, attr)
)
|
Return a dict that can be serialised to JSON and sent to UpCloud's API.
|
entailment
|
def _require_bucket(self, bucket_name):
""" Also try to create the bucket. """
if not self.exists(bucket_name) and not self.claim_bucket(bucket_name):
raise OFSException("Invalid bucket: %s" % bucket_name)
return self._get_bucket(bucket_name)
|
Also try to create the bucket.
|
entailment
|
def del_stream(self, bucket, label):
""" Will fail if the bucket or label don't exist """
bucket = self._require_bucket(bucket)
key = self._require_key(bucket, label)
key.delete()
|
Will fail if the bucket or label don't exist
|
entailment
|
def authenticate_request(self, method, bucket='', key='', headers=None):
'''Authenticate a HTTP request by filling in Authorization field header.
:param method: HTTP method (e.g. GET, PUT, POST)
:param bucket: name of the bucket.
:param key: name of key within bucket.
:param headers: dictionary of additional HTTP headers.
:return: boto.connection.HTTPRequest object with Authorization header
filled (NB: will also have a Date field if none before and a User-Agent
field will be set to Boto).
'''
# following is extracted from S3Connection.make_request and the method
# it calls: AWSAuthConnection.make_request
path = self.conn.calling_format.build_path_base(bucket, key)
auth_path = self.conn.calling_format.build_auth_path(bucket, key)
http_request = boto.connection.AWSAuthConnection.build_base_http_request(
self.conn,
method,
path,
auth_path,
{},
headers
)
http_request.authorize(connection=self.conn)
return http_request
|
Authenticate a HTTP request by filling in Authorization field header.
:param method: HTTP method (e.g. GET, PUT, POST)
:param bucket: name of the bucket.
:param key: name of key within bucket.
:param headers: dictionary of additional HTTP headers.
:return: boto.connection.HTTPRequest object with Authorization header
filled (NB: will also have a Date field if none before and a User-Agent
field will be set to Boto).
|
entailment
|
def get_resources_to_check(client_site_url, apikey):
"""Return a list of resource IDs to check for broken links.
Calls the client site's API to get a list of resource IDs.
:raises CouldNotGetResourceIDsError: if getting the resource IDs fails
for any reason
"""
url = client_site_url + u"deadoralive/get_resources_to_check"
response = requests.get(url, headers=dict(Authorization=apikey))
if not response.ok:
raise CouldNotGetResourceIDsError(
u"Couldn't get resource IDs to check: {code} {reason}".format(
code=response.status_code, reason=response.reason))
return response.json()
|
Return a list of resource IDs to check for broken links.
Calls the client site's API to get a list of resource IDs.
:raises CouldNotGetResourceIDsError: if getting the resource IDs fails
for any reason
|
entailment
|
def get_url_for_id(client_site_url, apikey, resource_id):
"""Return the URL for the given resource ID.
Contacts the client site's API to get the URL for the ID and returns it.
:raises CouldNotGetURLError: if getting the URL fails for any reason
"""
# TODO: Handle invalid responses from the client site.
url = client_site_url + u"deadoralive/get_url_for_resource_id"
params = {"resource_id": resource_id}
response = requests.get(url, headers=dict(Authorization=apikey),
params=params)
if not response.ok:
raise CouldNotGetURLError(
u"Couldn't get URL for resource {id}: {code} {reason}".format(
id=resource_id, code=response.status_code,
reason=response.reason))
return response.json()
|
Return the URL for the given resource ID.
Contacts the client site's API to get the URL for the ID and returns it.
:raises CouldNotGetURLError: if getting the URL fails for any reason
|
entailment
|
def check_url(url):
"""Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response.
"""
result = {"url": url}
try:
response = requests.get(url)
result["status"] = response.status_code
result["reason"] = response.reason
response.raise_for_status() # Raise if status_code is not OK.
result["alive"] = True
except AttributeError as err:
if err.message == "'NoneType' object has no attribute 'encode'":
# requests seems to throw these for some invalid URLs.
result["alive"] = False
result["reason"] = "Invalid URL"
result["status"] = None
else:
raise
except requests.exceptions.RequestException as err:
result["alive"] = False
if "reason" not in result:
result["reason"] = str(err)
if "status" not in result:
# This can happen if the response is invalid HTTP, if we get a DNS
# failure, or a timeout, etc.
result["status"] = None
# We should always have these four fields in the result.
assert "url" in result
assert result.get("alive") in (True, False)
assert "status" in result
assert "reason" in result
return result
|
Check whether the given URL is dead or alive.
Returns a dict with four keys:
"url": The URL that was checked (string)
"alive": Whether the URL was working, True or False
"status": The HTTP status code of the response from the URL,
e.g. 200, 401, 500 (int)
"reason": The reason for the success or failure of the check,
e.g. "OK", "Unauthorized", "Internal Server Error" (string)
The "status" may be None if we did not get a valid HTTP response,
e.g. in the event of a timeout, DNS failure or invalid HTTP response.
The "reason" will always be a string, but may be a requests library
exception string rather than an HTTP reason string if we did not get a valid
HTTP response.
|
entailment
|
def upsert_result(client_site_url, apikey, resource_id, result):
"""Post the given link check result to the client site."""
# TODO: Handle exceptions and unexpected results.
url = client_site_url + u"deadoralive/upsert"
params = result.copy()
params["resource_id"] = resource_id
requests.post(url, headers=dict(Authorization=apikey), params=params)
|
Post the given link check result to the client site.
|
entailment
|
def get_check_and_report(client_site_url, apikey, get_resource_ids_to_check,
get_url_for_id, check_url, upsert_result):
"""Get links from the client site, check them, and post the results back.
Get resource IDs from the client site, get the URL for each resource ID from
the client site, check each URL, and post the results back to the client
site.
This function can be called repeatedly to keep on getting more links from
the client site and checking them.
The functions that this function calls to carry out the various tasks are
taken as parameters to this function for testing purposes - it makes it
easy for tests to pass in mock functions. It also decouples the code nicely.
:param client_site_url: the base URL of the client site
:type client_site_url: string
:param apikey: the API key to use when making requests to the client site
:type apikey: string or None
:param get_resource_ids_to_check: The function to call to get the list of
resource IDs to be checked from the client site. See
get_resource_ids_to_check() above for the interface that this function
should implement.
:type get_resource_ids_to_check: callable
:param get_url_for_id: The function to call to get the URL for a given
resource ID from the client site. See get_url_for_id() above for the
interface that this function should implement.
:type get_url_for_id: callable
:param check_url: The function to call to check whether a URL is dead or
alive. See check_url() above for the interface that this function
should implement.
:type check_url: callable
:param upsert_result: The function to call to post a link check result to
the client site. See upsert_result() above for the interface that this
function should implement.
:type upsert_result: callable
"""
logger = _get_logger()
resource_ids = get_resource_ids_to_check(client_site_url, apikey)
for resource_id in resource_ids:
try:
url = get_url_for_id(client_site_url, apikey, resource_id)
except CouldNotGetURLError:
logger.info(u"This link checker was not authorized to access "
"resource {0}, skipping.".format(resource_id))
continue
result = check_url(url)
status = result["status"]
reason = result["reason"]
if result["alive"]:
logger.info(u"Checking URL {0} of resource {1} succeeded with "
"status {2}:".format(url, resource_id, status))
else:
logger.info(u"Checking URL {0} of resource {1} failed with error "
"{2}:".format(url, resource_id, reason))
upsert_result(client_site_url, apikey, resource_id=resource_id,
result=result)
|
Get links from the client site, check them, and post the results back.
Get resource IDs from the client site, get the URL for each resource ID from
the client site, check each URL, and post the results back to the client
site.
This function can be called repeatedly to keep on getting more links from
the client site and checking them.
The functions that this function calls to carry out the various tasks are
taken as parameters to this function for testing purposes - it makes it
easy for tests to pass in mock functions. It also decouples the code nicely.
:param client_site_url: the base URL of the client site
:type client_site_url: string
:param apikey: the API key to use when making requests to the client site
:type apikey: string or None
:param get_resource_ids_to_check: The function to call to get the list of
resource IDs to be checked from the client site. See
get_resource_ids_to_check() above for the interface that this function
should implement.
:type get_resource_ids_to_check: callable
:param get_url_for_id: The function to call to get the URL for a given
resource ID from the client site. See get_url_for_id() above for the
interface that this function should implement.
:type get_url_for_id: callable
:param check_url: The function to call to check whether a URL is dead or
alive. See check_url() above for the interface that this function
should implement.
:type check_url: callable
:param upsert_result: The function to call to post a link check result to
the client site. See upsert_result() above for the interface that this
function should implement.
:type upsert_result: callable
|
entailment
|
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
|
Returns buffered bytes without advancing the position.
|
entailment
|
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = b''
while n < 0 or n is None or n > len(buf):
data = self.read1(n)
if len(data) == 0:
return buf
buf += data
return buf
|
Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
|
entailment
|
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
endrec = _EndRecData(fp)
if not endrec:
raise BadZipfile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile("Bad magic number for central directory")
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
|
Read in the table of contents for the ZIP file.
|
entailment
|
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed"
)
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile("Bad magic number for file header")
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename.encode('utf-8'):
raise BadZipfile(
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, " \
"password required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
|
Return file-like object for 'name'.
|
entailment
|
def remove(self, member):
"""Remove a member from the archive."""
# Make sure we have an info object
if isinstance(member, ZipInfo):
# 'member' is already an info object
zinfo = member
else:
# Get info object for name
zinfo = self.getinfo(member)
# compute the location of the file data in the local file header,
# by adding the lengths of the records before it
zlen = len(zinfo.FileHeader()) + zinfo.compress_size
fileidx = self.filelist.index(zinfo)
fileofs = sum(
[len(self.filelist[f].FileHeader()) + self.filelist[f].compress_size
for f in xrange(0, fileidx)]
)
self.fp.seek(fileofs + zlen)
after = self.fp.read()
self.fp.seek(fileofs)
self.fp.write(after)
self.fp.seek(-zlen, 2)
self.fp.truncate()
self._didModify = True
self.filelist.remove(zinfo)
del self.NameToInfo[member]
|
Remove a member from the archive.
|
entailment
|
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print("Compiling", file_py)
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError as err:
print(err.msg)
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
|
Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
|
entailment
|
def import_class(class_path):
'''
Imports the class for the given class name.
'''
module_name, class_name = class_path.rsplit(".", 1)
module = import_module(module_name)
claz = getattr(module, class_name)
return claz
|
Imports the class for the given class name.
|
entailment
|
def _executor(self):
'''
Creating an ExecutorPool is a costly operation. Executor needs to be instantiated only once.
'''
if self.EXECUTE_PARALLEL is False:
executor_path = "batch_requests.concurrent.executor.SequentialExecutor"
executor_class = import_class(executor_path)
return executor_class()
else:
executor_path = self.CONCURRENT_EXECUTOR
executor_class = import_class(executor_path)
return executor_class(self.NUM_WORKERS)
|
Creating an ExecutorPool is a costly operation. Executor needs to be instantiated only once.
|
entailment
|
def make_label(self, path):
"""
this borrows too much from the internals of ofs
maybe expose different parts of the api?
"""
from datetime import datetime
from StringIO import StringIO
path = path.lstrip("/")
bucket, label = path.split("/", 1)
bucket = self.ofs._require_bucket(bucket)
key = self.ofs._get_key(bucket, label)
if key is None:
key = bucket.new_key(label)
self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) })
key.set_contents_from_file(StringIO(''))
key.close()
|
this borrows too much from the internals of ofs
maybe expose different parts of the api?
|
entailment
|
def get_proxy_config(self, headers, path):
"""
stub. this really needs to be a call to the remote
restful interface to get the appropriate host and
headers to use for this upload
"""
self.ofs.conn.add_aws_auth_header(headers, 'PUT', path)
from pprint import pprint
pprint(headers)
host = self.ofs.conn.server_name()
return host, headers
|
stub. this really needs to be a call to the remote
restful interface to get the appropriate host and
headers to use for this upload
|
entailment
|
def proxy_upload(self, path, filename, content_type=None, content_encoding=None,
cb=None, num_cb=None):
"""
This is the main function that uploads. We assume the bucket
and key (== path) exists. What we do here is simple. Calculate
the headers we will need, (e.g. md5, content-type, etc). Then
we ask the self.get_proxy_config method to fill in the authentication
information and tell us which remote host we should talk to
for the upload. From there, the rest is ripped from
boto.key.Key.send_file
"""
from boto.connection import AWSAuthConnection
import mimetypes
from hashlib import md5
import base64
BufferSize = 65536 ## set to something very small to make sure
## chunking is working properly
fp = open(filename)
headers = { 'Content-Type': content_type }
if content_type is None:
content_type = mimetypes.guess_type(filename)[0] or "text/plain"
headers['Content-Type'] = content_type
if content_encoding is not None:
headers['Content-Encoding'] = content_encoding
m = md5()
fp.seek(0)
s = fp.read(BufferSize)
while s:
m.update(s)
s = fp.read(BufferSize)
self.size = fp.tell()
fp.seek(0)
self.md5 = m.hexdigest()
headers['Content-MD5'] = base64.encodestring(m.digest()).rstrip('\n')
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
host, headers = self.get_proxy_config(headers, path)
### how to do this same thing with curl instead...
print("curl -i --trace-ascii foo.log -T %s -H %s https://%s%s" % (
filename,
" -H ".join("'%s: %s'" % (k,v) for k,v in headers.items()),
host, path
))
def sender(http_conn, method, path, data, headers):
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
fp.seek(0)
http_conn.set_debuglevel(0) ### XXX set to e.g. 4 to see what going on
if cb:
if num_cb > 2:
cb_count = self.size / BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
l = fp.read(BufferSize)
while len(l) > 0:
http_conn.send(l)
if cb:
total_bytes += len(l)
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes, self.size)
i = 0
l = fp.read(BufferSize)
if cb:
cb(total_bytes, self.size)
response = http_conn.getresponse()
body = response.read()
fp.seek(0)
if response.status == 500 or response.status == 503 or \
response.getheader('location'):
# we'll try again
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise Exception('ETag from S3 did not match computed MD5')
return response
else:
#raise provider.storage_response_error(
# response.status, response.reason, body)
raise Exception(response.status, response.reason, body)
awsc = AWSAuthConnection(host,
aws_access_key_id="key_id",
aws_secret_access_key="secret")
awsc._mexe('PUT', path, None, headers, sender=sender)
|
This is the main function that uploads. We assume the bucket
and key (== path) exists. What we do here is simple. Calculate
the headers we will need, (e.g. md5, content-type, etc). Then
we ask the self.get_proxy_config method to fill in the authentication
information and tell us which remote host we should talk to
for the upload. From there, the rest is ripped from
boto.key.Key.send_file
|
entailment
|
def fetch_all_mood_stations(self, terr=KKBOXTerritory.TAIWAN):
'''
Fetches all mood stations.
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#moodstations`.
'''
url = 'https://api.kkbox.com/v1.1/mood-stations'
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches all mood stations.
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#moodstations`.
|
entailment
|
def fetch_mood_station(self, station_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches a mood station by given ID.
:param station_id: the station ID
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#moodstations-station_id`.
'''
url = 'https://api.kkbox.com/v1.1/mood-stations/%s' % station_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches a mood station by given ID.
:param station_id: the station ID
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#moodstations-station_id`.
|
entailment
|
def fetch_next_page(self, data):
'''
Fetches next page based on previously fetched data.
Will get the next page url from data['paging']['next'].
:param data: previously fetched API response.
:type data: dict
:return: API response.
:rtype: dict
'''
next_url = data['paging']['next']
if next_url != None:
next_data = self.http._post_data(next_url, None, self.http._headers_with_access_token())
return next_data
else:
return None
|
Fetches next page based on previously fetched data.
Will get the next page url from data['paging']['next'].
:param data: previously fetched API response.
:type data: dict
:return: API response.
:rtype: dict
|
entailment
|
def fetch_data(self, url):
'''
Fetches data from specific url.
:return: The response.
:rtype: dict
'''
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches data from specific url.
:return: The response.
:rtype: dict
|
entailment
|
def fetch_shared_playlist(self, playlist_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches a shared playlist by given ID.
:param playlist_id: the playlist ID.
:type playlist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dictcd
See `https://docs-en.kkbox.codes/v1.1/reference#sharedplaylists-playlist_id`.
'''
url = 'https://api.kkbox.com/v1.1/shared-playlists/%s' % playlist_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches a shared playlist by given ID.
:param playlist_id: the playlist ID.
:type playlist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dictcd
See `https://docs-en.kkbox.codes/v1.1/reference#sharedplaylists-playlist_id`.
|
entailment
|
def get_firewall_rule(self, server_uuid, firewall_rule_position, server_instance=None):
"""
Return a FirewallRule object based on server uuid and rule position.
"""
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
res = self.get_request(url)
return FirewallRule(**res['firewall_rule'])
|
Return a FirewallRule object based on server uuid and rule position.
|
entailment
|
def get_firewall_rules(self, server):
"""
Return all FirewallRule objects based on a server instance or uuid.
"""
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
res = self.get_request(url)
return [
FirewallRule(server=server_instance, **firewall_rule)
for firewall_rule in res['firewall_rules']['firewall_rule']
]
|
Return all FirewallRule objects based on a server instance or uuid.
|
entailment
|
def create_firewall_rule(self, server, firewall_rule_body):
"""
Create a new firewall rule for a given server uuid.
The rule can begiven as a dict or with FirewallRule.prepare_post_body().
Returns a FirewallRule object.
"""
server_uuid, server_instance = uuid_and_instance(server)
url = '/server/{0}/firewall_rule'.format(server_uuid)
body = {'firewall_rule': firewall_rule_body}
res = self.post_request(url, body)
return FirewallRule(server=server_instance, **res['firewall_rule'])
|
Create a new firewall rule for a given server uuid.
The rule can begiven as a dict or with FirewallRule.prepare_post_body().
Returns a FirewallRule object.
|
entailment
|
def delete_firewall_rule(self, server_uuid, firewall_rule_position):
"""
Delete a firewall rule based on a server uuid and rule position.
"""
url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position)
return self.request('DELETE', url)
|
Delete a firewall rule based on a server uuid and rule position.
|
entailment
|
def configure_firewall(self, server, firewall_rule_bodies):
"""
Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies.
"""
server_uuid, server_instance = uuid_and_instance(server)
return [
self.create_firewall_rule(server_uuid, rule)
for rule in firewall_rule_bodies
]
|
Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies.
|
entailment
|
def post(self, data):
"""
POSTs a raw SMTP message to the Sinkhole API
:param data: raw content to be submitted [STRING]
:return: { list of predictions }
"""
uri = '{}/sinkhole'.format(self.client.remote)
self.logger.debug(uri)
if PYVERSION == 2:
try:
data = data.decode('utf-8')
except Exception:
data = data.decode('latin-1')
data = {
'message': data
}
body = self.client.post(uri, data)
return body
|
POSTs a raw SMTP message to the Sinkhole API
:param data: raw content to be submitted [STRING]
:return: { list of predictions }
|
entailment
|
def pre_process_method_headers(method, headers):
'''
Returns the lowered method.
Capitalize headers, prepend HTTP_ and change - to _.
'''
method = method.lower()
# Standard WSGI supported headers
_wsgi_headers = ["content_length", "content_type", "query_string",
"remote_addr", "remote_host", "remote_user",
"request_method", "server_name", "server_port"]
_transformed_headers = {}
# For every header, replace - to _, prepend http_ if necessary and convert
# to upper case.
for header, value in headers.items():
header = header.replace("-", "_")
header = "http_{header}".format(
header=header) if header.lower() not in _wsgi_headers else header
_transformed_headers.update({header.upper(): value})
return method, _transformed_headers
|
Returns the lowered method.
Capitalize headers, prepend HTTP_ and change - to _.
|
entailment
|
def headers_to_include_from_request(curr_request):
'''
Define headers that needs to be included from the current request.
'''
return {
h: v for h, v in curr_request.META.items() if h in _settings.HEADERS_TO_INCLUDE}
|
Define headers that needs to be included from the current request.
|
entailment
|
def get_wsgi_request_object(curr_request, method, url, headers, body):
'''
Based on the given request parameters, constructs and returns the WSGI request object.
'''
x_headers = headers_to_include_from_request(curr_request)
method, t_headers = pre_process_method_headers(method, headers)
# Add default content type.
if "CONTENT_TYPE" not in t_headers:
t_headers.update({"CONTENT_TYPE": _settings.DEFAULT_CONTENT_TYPE})
# Override existing batch requests headers with the new headers passed for this request.
x_headers.update(t_headers)
content_type = x_headers.get("CONTENT_TYPE", _settings.DEFAULT_CONTENT_TYPE)
# Get hold of request factory to construct the request.
_request_factory = BatchRequestFactory()
_request_provider = getattr(_request_factory, method)
secure = _settings.USE_HTTPS
request = _request_provider(url, data=body, secure=secure,
content_type=content_type, **x_headers)
return request
|
Based on the given request parameters, constructs and returns the WSGI request object.
|
entailment
|
def _base_environ(self, **request):
'''
Override the default values for the wsgi environment variables.
'''
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('localhost'),
'SERVER_PORT': str('8000'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': True,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
|
Override the default values for the wsgi environment variables.
|
entailment
|
def request(self, method, endpoint, body=None, timeout=-1):
"""
Perform a request with a given body to a given endpoint in UpCloud's API.
Handles errors with __error_middleware.
"""
if method not in set(['GET', 'POST', 'PUT', 'DELETE']):
raise Exception('Invalid/Forbidden HTTP method')
url = '/' + self.api_v + endpoint
headers = {
'Authorization': self.token,
'Content-Type': 'application/json'
}
if body:
json_body_or_None = json.dumps(body)
else:
json_body_or_None = None
call_timeout = timeout if timeout != -1 else self.timeout
APIcall = getattr(requests, method.lower())
res = APIcall('https://api.upcloud.com' + url,
data=json_body_or_None,
headers=headers,
timeout=call_timeout)
if res.text:
res_json = res.json()
else:
res_json = {}
return self.__error_middleware(res, res_json)
|
Perform a request with a given body to a given endpoint in UpCloud's API.
Handles errors with __error_middleware.
|
entailment
|
def post_request(self, endpoint, body=None, timeout=-1):
"""
Perform a POST request to a given endpoint in UpCloud's API.
"""
return self.request('POST', endpoint, body, timeout)
|
Perform a POST request to a given endpoint in UpCloud's API.
|
entailment
|
def __error_middleware(self, res, res_json):
"""
Middleware that raises an exception when HTTP statuscode is an error code.
"""
if(res.status_code in [400, 401, 402, 403, 404, 405, 406, 409]):
err_dict = res_json.get('error', {})
raise UpCloudAPIError(error_code=err_dict.get('error_code'),
error_message=err_dict.get('error_message'))
return res_json
|
Middleware that raises an exception when HTTP statuscode is an error code.
|
entailment
|
def put_stream(self, bucket, label, stream_object, params={}):
''' Create a new file to swift object storage. '''
self.claim_bucket(bucket)
self.connection.put_object(bucket, label, stream_object,
headers=self._convert_to_meta(params))
|
Create a new file to swift object storage.
|
entailment
|
def h(gbm, array_or_frame, indices_or_columns = 'all'):
"""
PURPOSE
Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting
model among the variables represented by the elements of the passed array or frame and specified by the passed
indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables.
EXAMPLES
Friedman and Popescu's (2008) formulas (44) and (46) correspond to
h(F, x, [j, k])
and
h(F, x, [j, k, l])
respectively.
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm.
"""
if indices_or_columns == 'all':
if gbm.max_depth < array_or_frame.shape[1]:
raise \
Exception(
"gbm.max_depth == {} < array_or_frame.shape[1] == {}, so indices_or_columns must not be 'all'."
.format(gbm.max_depth, array_or_frame.shape[1])
)
else:
if gbm.max_depth < len(indices_or_columns):
raise \
Exception(
"gbm.max_depth == {}, so indices_or_columns must contain at most {} {}."
.format(gbm.max_depth, gbm.max_depth, "element" if gbm.max_depth == 1 else "elements")
)
check_args_contd(array_or_frame, indices_or_columns)
arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns)
width = arr.shape[1]
f_vals = {}
for n in range(width, 0, -1):
for inds in itertools.combinations(range(width), n):
f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds)
return compute_h_val(f_vals, arr, tuple(range(width)))
|
PURPOSE
Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting
model among the variables represented by the elements of the passed array or frame and specified by the passed
indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables.
EXAMPLES
Friedman and Popescu's (2008) formulas (44) and (46) correspond to
h(F, x, [j, k])
and
h(F, x, [j, k, l])
respectively.
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm.
|
entailment
|
def h_all_pairs(gbm, array_or_frame, indices_or_columns = 'all'):
"""
PURPOSE
Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient-
boosting model between each pair of variables represented by the elements of the passed array or frame and specified
by the passed indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of
variables or NaN if a computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables.
EXAMPLE
Friedman and Popescu's (2008) formula (44) for every j and k corresponds to
h_all_pairs(F, x)
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm.
"""
if gbm.max_depth < 2:
raise Exception("gbm.max_depth must be at least 2.")
check_args_contd(array_or_frame, indices_or_columns)
arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns)
width = arr.shape[1]
f_vals = {}
for n in [2, 1]:
for inds in itertools.combinations(range(width), n):
f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds)
h_vals = {}
for inds in itertools.combinations(range(width), 2):
h_vals[inds] = compute_h_val(f_vals, arr, inds)
if indices_or_columns != 'all':
h_vals = {tuple(model_inds[(inds,)]): h_vals[inds] for inds in h_vals.keys()}
if not isinstance(array_or_frame, np.ndarray):
all_cols = array_or_frame.columns.values
h_vals = {tuple(all_cols[(inds,)]): h_vals[inds] for inds in h_vals.keys()}
return h_vals
|
PURPOSE
Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient-
boosting model between each pair of variables represented by the elements of the passed array or frame and specified
by the passed indices or columns.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat.
2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
ARGUMENTS
gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or
sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here).
array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas
.DataFrame).
indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of
array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a
pandas data frame. If it is 'all', then all columns of array_or_frame are used.
RETURNS
A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of
variables or NaN if a computation is spoiled by weak main effects and rounding errors.
H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables.
EXAMPLE
Friedman and Popescu's (2008) formula (44) for every j and k corresponds to
h_all_pairs(F, x)
NOTES
1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths
of main effects are available as gbm.feature_importances_ once gbm has been fitted.
2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in
the target function. To forestall such spurious interactions, check for strong correlations among variables before
fitting gbm.
|
entailment
|
def get(self, q, limit=None):
"""
Performs a search against the predict endpoint
:param q: query to be searched for [STRING]
:return: { score: [0|1] }
"""
uri = '{}/predict?q={}'.format(self.client.remote, q)
self.logger.debug(uri)
body = self.client.get(uri)
return body['score']
|
Performs a search against the predict endpoint
:param q: query to be searched for [STRING]
:return: { score: [0|1] }
|
entailment
|
def exists(self, bucket, label):
'''Whether a given bucket:label object already exists.'''
fn = self._zf(bucket, label)
try:
self.z.getinfo(fn)
return True
except KeyError:
return False
|
Whether a given bucket:label object already exists.
|
entailment
|
def list_labels(self, bucket):
'''List labels for the given bucket. Due to zipfiles inherent arbitrary ordering,
this is an expensive operation, as it walks the entire archive searching for individual
'buckets'
:param bucket: bucket to list labels for.
:return: iterator for the labels in the specified bucket.
'''
for name in self.z.namelist():
container, label = self._nf(name.encode("utf-8"))
if container == bucket and label != MD_FILE:
yield label
|
List labels for the given bucket. Due to zipfiles inherent arbitrary ordering,
this is an expensive operation, as it walks the entire archive searching for individual
'buckets'
:param bucket: bucket to list labels for.
:return: iterator for the labels in the specified bucket.
|
entailment
|
def list_buckets(self):
'''List all buckets managed by this OFS instance. Like list_labels, this also
walks the entire archive, yielding the bucketnames. A local set is retained so that
duplicates aren't returned so this will temporarily pull the entire list into memory
even though this is a generator and will slow as more buckets are added to the set.
:return: iterator for the buckets.
'''
buckets = set()
for name in self.z.namelist():
bucket, _ = self._nf(name)
if bucket not in buckets:
buckets.add(bucket)
yield bucket
|
List all buckets managed by this OFS instance. Like list_labels, this also
walks the entire archive, yielding the bucketnames. A local set is retained so that
duplicates aren't returned so this will temporarily pull the entire list into memory
even though this is a generator and will slow as more buckets are added to the set.
:return: iterator for the buckets.
|
entailment
|
def get_stream(self, bucket, label, as_stream=True):
'''Get a bitstream for the given bucket:label combination.
:param bucket: the bucket to use.
:return: bitstream as a file-like object
'''
if self.mode == "w":
raise OFSException("Cannot read from archive in 'w' mode")
elif self.exists(bucket, label):
fn = self._zf(bucket, label)
if as_stream:
return self.z.open(fn)
else:
return self.z.read(fn)
else:
raise OFSFileNotFound
|
Get a bitstream for the given bucket:label combination.
:param bucket: the bucket to use.
:return: bitstream as a file-like object
|
entailment
|
def get_url(self, bucket, label):
'''Get a URL that should point at the bucket:labelled resource. Aimed to aid web apps by allowing them to redirect to an open resource, rather than proxy the bitstream.
:param bucket: the bucket to use.
:param label: the label of the resource to get
:return: a string URI - eg 'zip:file:///home/.../foo.zip!/bucket/label'
'''
if self.exists(bucket, label):
root = "zip:file//%s" % os.path.abspath(self.zipfile)
fn = self._zf(bucket, label)
return "!/".join(root, fn)
else:
raise OFSFileNotFound
|
Get a URL that should point at the bucket:labelled resource. Aimed to aid web apps by allowing them to redirect to an open resource, rather than proxy the bitstream.
:param bucket: the bucket to use.
:param label: the label of the resource to get
:return: a string URI - eg 'zip:file:///home/.../foo.zip!/bucket/label'
|
entailment
|
def put_stream(self, bucket, label, stream_object, params=None, replace=True, add_md=True):
'''Put a bitstream (stream_object) for the specified bucket:label identifier.
:param bucket: as standard
:param label: as standard
:param stream_object: file-like object to read from or bytestring.
:param params: update metadata with these params (see `update_metadata`)
'''
if self.mode == "r":
raise OFSException("Cannot write into archive in 'r' mode")
else:
params = params or {}
fn = self._zf(bucket, label)
params['_creation_date'] = datetime.now().isoformat().split(".")[0] ## '2010-07-08T19:56:47'
params['_label'] = label
if self.exists(bucket, label) and replace==True:
# Add then Replace? Let's see if that works...
#z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64)
zinfo = self.z.getinfo(fn)
size, chksum = self._write(self.z, bucket, label, stream_object)
self._del_stream(zinfo)
#z.close()
params['_content_length'] = size
if chksum:
params['_checksum'] = chksum
else:
#z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64)
size, chksum = self._write(self.z, bucket, label, stream_object)
#z.close()
params['_content_length'] = size
if chksum:
params['_checksum'] = chksum
if add_md:
params = self.update_metadata(bucket, label, params)
return params
|
Put a bitstream (stream_object) for the specified bucket:label identifier.
:param bucket: as standard
:param label: as standard
:param stream_object: file-like object to read from or bytestring.
:param params: update metadata with these params (see `update_metadata`)
|
entailment
|
def del_stream(self, bucket, label):
'''Delete a bitstream. This needs more testing - file deletion in a zipfile
is problematic. Alternate method is to create second zipfile without the files
in question, which is not a nice method for large zip archives.
'''
if self.exists(bucket, label):
name = self._zf(bucket, label)
#z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64)
self._del_stream(name)
|
Delete a bitstream. This needs more testing - file deletion in a zipfile
is problematic. Alternate method is to create second zipfile without the files
in question, which is not a nice method for large zip archives.
|
entailment
|
def get_metadata(self, bucket, label):
'''Get the metadata for this bucket:label identifier.
'''
if self.mode !="w":
try:
jsn = self._get_bucket_md(bucket)
except OFSFileNotFound:
# No MD found...
return {}
except OFSException as e:
raise OFSException(e)
if label in jsn:
return jsn[label]
else:
return {}
else:
raise OFSException("Cannot read md from archive in 'w' mode")
|
Get the metadata for this bucket:label identifier.
|
entailment
|
def update_metadata(self, bucket, label, params):
'''Update the metadata with the provided dictionary of params.
:param parmams: dictionary of key values (json serializable).
'''
if self.mode !="r":
try:
payload = self._get_bucket_md(bucket)
except OFSFileNotFound:
# No MD found... create it
payload = {}
for l in self.list_labels(bucket):
payload[l] = {}
payload[l]['_label'] = l
if not self.quiet:
print("Had to create md file for %s" % bucket)
except OFSException as e:
raise OFSException(e)
if not label in payload:
payload[label] = {}
payload[label].update(params)
self.put_stream(bucket, MD_FILE, json.dumps(payload).encode('utf-8'), params={}, replace=True, add_md=False)
return payload[label]
else:
raise OFSException("Cannot update MD in archive in 'r' mode")
|
Update the metadata with the provided dictionary of params.
:param parmams: dictionary of key values (json serializable).
|
entailment
|
def del_metadata_keys(self, bucket, label, keys):
'''Delete the metadata corresponding to the specified keys.
'''
if self.mode !="r":
try:
payload = self._get_bucket_md(bucket)
except OFSFileNotFound:
# No MD found...
raise OFSFileNotFound("Couldn't find a md file for %s bucket" % bucket)
except OFSException as e:
raise OFSException(e)
if payload.has_key(label):
for key in [x for x in keys if payload[label].has_key(x)]:
del payload[label][key]
self.put_stream(bucket, MD_FILE, json.dumps(payload), params={}, replace=True, add_md=False)
else:
raise OFSException("Cannot update MD in archive in 'r' mode")
|
Delete the metadata corresponding to the specified keys.
|
entailment
|
def get_response(wsgi_request):
'''
Given a WSGI request, makes a call to a corresponding view
function and returns the response.
'''
service_start_time = datetime.now()
# Get the view / handler for this request
view, args, kwargs = resolve(wsgi_request.path_info)
kwargs.update({"request": wsgi_request})
# Let the view do his task.
try:
resp = view(*args, **kwargs)
except Exception as exc:
resp = HttpResponseServerError(content=exc.message)
headers = dict(resp._headers.values())
# Convert HTTP response into simple dict type.
d_resp = {"status_code": resp.status_code, "reason_phrase": resp.reason_phrase,
"headers": headers}
try:
d_resp.update({"body": resp.content})
except ContentNotRenderedError:
resp.render()
d_resp.update({"body": resp.content})
# Check if we need to send across the duration header.
if _settings.ADD_DURATION_HEADER:
d_resp['headers'].update({_settings.DURATION_HEADER_NAME: (datetime.now() - service_start_time).seconds})
return d_resp
|
Given a WSGI request, makes a call to a corresponding view
function and returns the response.
|
entailment
|
def get_wsgi_requests(request):
'''
For the given batch request, extract the individual requests and create
WSGIRequest object for each.
'''
valid_http_methods = ["get", "post", "put", "patch", "delete", "head", "options", "connect", "trace"]
requests = json.loads(request.body)
if type(requests) not in (list, tuple):
raise BadBatchRequest("The body of batch request should always be list!")
# Max limit check.
no_requests = len(requests)
if no_requests > _settings.MAX_LIMIT:
raise BadBatchRequest("You can batch maximum of %d requests." % (_settings.MAX_LIMIT))
# We could mutate the current request with the respective parameters, but mutation is ghost in the dark,
# so lets avoid. Construct the new WSGI request object for each request.
def construct_wsgi_from_data(data):
'''
Given the data in the format of url, method, body and headers, construct a new
WSGIRequest object.
'''
url = data.get("url", None)
method = data.get("method", None)
if url is None or method is None:
raise BadBatchRequest("Request definition should have url, method defined.")
if method.lower() not in valid_http_methods:
raise BadBatchRequest("Invalid request method.")
body = data.get("body", "")
headers = data.get("headers", {})
return get_wsgi_request_object(request, method, url, headers, body)
return [construct_wsgi_from_data(data) for data in requests]
|
For the given batch request, extract the individual requests and create
WSGIRequest object for each.
|
entailment
|
def handle_batch_requests(request, *args, **kwargs):
'''
A view function to handle the overall processing of batch requests.
'''
batch_start_time = datetime.now()
try:
# Get the Individual WSGI requests.
wsgi_requests = get_wsgi_requests(request)
except BadBatchRequest as brx:
return HttpResponseBadRequest(content=brx.message)
# Fire these WSGI requests, and collect the response for the same.
response = execute_requests(wsgi_requests)
# Evrything's done, return the response.
resp = HttpResponse(
content=json.dumps(response), content_type="application/json")
if _settings.ADD_DURATION_HEADER:
resp.__setitem__(_settings.DURATION_HEADER_NAME, str((datetime.now() - batch_start_time).seconds))
return resp
|
A view function to handle the overall processing of batch requests.
|
entailment
|
def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN):
'''
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
'''
url = 'https://api.kkbox.com/v1.1/search'
url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr})
if len(types) > 0:
url += '&type=' + ','.join(types)
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Searches within KKBOX's database.
:param keyword: the keyword.
:type keyword: str
:param types: the search types.
:return: list
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#search_1`.
|
entailment
|
def save(self):
"""
IPAddress can only change its PTR record. Saves the current state, PUT /ip_address/uuid.
"""
body = {'ip_address': {'ptr_record': self.ptr_record}}
data = self.cloud_manager.request('PUT', '/ip_address/' + self.address, body)
self._reset(**data['ip_address'])
|
IPAddress can only change its PTR record. Saves the current state, PUT /ip_address/uuid.
|
entailment
|
def _create_ip_address_objs(ip_addresses, cloud_manager):
"""
Create IPAddress objects from API response data.
Also associates CloudManager with the objects.
"""
# ip-addresses might be provided as a flat array or as a following dict:
# {'ip_addresses': {'ip_address': [...]}} || {'ip_address': [...]}
if 'ip_addresses' in ip_addresses:
ip_addresses = ip_addresses['ip_addresses']
if 'ip_address' in ip_addresses:
ip_addresses = ip_addresses['ip_address']
return [
IPAddress(cloud_manager=cloud_manager, **ip_addr)
for ip_addr in ip_addresses
]
|
Create IPAddress objects from API response data.
Also associates CloudManager with the objects.
|
entailment
|
def _reset(self, **kwargs):
"""
Reset the objects attributes.
Accepts servers as either unflattened or flattened UUID strings or Server objects.
"""
super(Tag, self)._reset(**kwargs)
# backup name for changing it (look: Tag.save)
self._api_name = self.name
# flatten { servers: { server: [] } }
if 'server' in self.servers:
self.servers = kwargs['servers']['server']
# convert UUIDs into server objects
if self.servers and isinstance(self.servers[0], six.string_types):
self.servers = [Server(uuid=server, populated=False) for server in self.servers]
|
Reset the objects attributes.
Accepts servers as either unflattened or flattened UUID strings or Server objects.
|
entailment
|
def _get(self, uri, params={}):
"""
HTTP GET function
:param uri: REST endpoint
:param params: optional HTTP params to pass to the endpoint
:return: list of results (usually a list of dicts)
Example:
ret = cli.get('/search', params={ 'q': 'example.org' })
"""
if not uri.startswith(self.remote):
uri = '{}{}'.format(self.remote, uri)
return self._make_request(uri, params)
|
HTTP GET function
:param uri: REST endpoint
:param params: optional HTTP params to pass to the endpoint
:return: list of results (usually a list of dicts)
Example:
ret = cli.get('/search', params={ 'q': 'example.org' })
|
entailment
|
def _post(self, uri, data):
"""
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
"""
if not uri.startswith(self.remote):
uri = '{}/{}'.format(self.remote, uri)
self.logger.debug(uri)
return self._make_request(uri, data=data)
|
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
|
entailment
|
def get_servers(self, populate=False, tags_has_one=None, tags_has_all=None):
"""
Return a list of (populated or unpopulated) Server instances.
- populate = False (default) => 1 API request, returns unpopulated Server instances.
- populate = True => Does 1 + n API requests (n = # of servers),
returns populated Server instances.
New in 0.3.0: the list can be filtered with tags:
- tags_has_one: list of Tag objects or strings
returns servers that have at least one of the given tags
- tags_has_all: list of Tag objects or strings
returns servers that have all of the tags
"""
if tags_has_all and tags_has_one:
raise Exception('only one of (tags_has_all, tags_has_one) is allowed.')
request = '/server'
if tags_has_all:
tags_has_all = [str(tag) for tag in tags_has_all]
taglist = ':'.join(tags_has_all)
request = '/server/tag/{0}'.format(taglist)
if tags_has_one:
tags_has_one = [str(tag) for tag in tags_has_one]
taglist = ','.join(tags_has_one)
request = '/server/tag/{0}'.format(taglist)
servers = self.get_request(request)['servers']['server']
server_list = list()
for server in servers:
server_list.append(Server(server, cloud_manager=self))
if populate:
for server_instance in server_list:
server_instance.populate()
return server_list
|
Return a list of (populated or unpopulated) Server instances.
- populate = False (default) => 1 API request, returns unpopulated Server instances.
- populate = True => Does 1 + n API requests (n = # of servers),
returns populated Server instances.
New in 0.3.0: the list can be filtered with tags:
- tags_has_one: list of Tag objects or strings
returns servers that have at least one of the given tags
- tags_has_all: list of Tag objects or strings
returns servers that have all of the tags
|
entailment
|
def get_server(self, UUID):
"""
Return a (populated) Server instance.
"""
server, IPAddresses, storages = self.get_server_data(UUID)
return Server(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True,
cloud_manager=self
)
|
Return a (populated) Server instance.
|
entailment
|
def get_server_by_ip(self, ip_address):
"""
Return a (populated) Server instance by its IP.
Uses GET '/ip_address/x.x.x.x' to retrieve machine UUID using IP-address.
"""
data = self.get_request('/ip_address/{0}'.format(ip_address))
UUID = data['ip_address']['server']
return self.get_server(UUID)
|
Return a (populated) Server instance by its IP.
Uses GET '/ip_address/x.x.x.x' to retrieve machine UUID using IP-address.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.